def cyclomaticComplexity(code): for member in cc_visit(code): blockType = member.letter blockComplexity = member.complexity blockRank = cc_rank(blockComplexity) blockFullName = member.fullname return [average_complexity(cc_visit(code)), cc_rank(average_complexity(cc_visit(code)))]
def compute_complexity(source): result = [] blocks = cc_visit(source) mix_path = mi_visit(source, True) for func in blocks: result.append(func.name + ": Rank:" + cc_rank(func.complexity)) return result
def get_code_complexity_measures(n1, n2): globzipfiles = glob.glob('../../../NewData_html/Nova/github_archives/nova-*.zip') for zipfile2 in globzipfiles[int(n1):int(n2)] : print zipfile2 commit_id = zipfile2.split('/')[6][:-4] outf1 = open('/media/mukherjee/OpenStack_Ext_Ha/Nova/Functionname_complexity/'+str(commit_id)+'.txt', 'w') outf2 = open('/media/mukherjee/OpenStack_Ext_Ha/Nova/Filename_complexity/'+str(commit_id)+'.txt', 'w') print >> outf1, 'filename|commit_id|function_name|function_letter|function_startline|function_endline|function_complexity' print >> outf2, 'filename|commit_id|MI|LOC|LLOC|SLOC|multi|blank|single_comments|halstead_volume|cyclomatic_complexity' with zipfile.ZipFile(zipfile2) as z: for filename in z.namelist(): if filename.endswith(".py") : if not os.path.isdir(filename): # iter through filenames starting from the current directory # you can pass ignore or exclude patterns here (as strings) # for example: ignore='tests,docs' with z.open(filename) as fobj: source = fobj.read() # get cc blocks try : blocks = cc_visit(source) if len(blocks) > 0 : for k in blocks : function_startline = k.lineno function_endline = k.endline function_letter = k.letter function_classname = k.name function_complexity = k.complexity print >> outf1, '%s|%s|%s|%s|%s|%s|%s' % (filename, commit_id, function_classname, function_letter, function_startline, function_endline, function_complexity) # get MI score mi = mi_visit(source, True) # get raw metrics raw = analyze(source) LOC = raw[0] LLOC = raw[1] SLOC = raw[2] multi = raw[4] blank = raw[5] single_comments = raw[6] # get MI parameters mi_par = mi_parameters(source) halstead_volume = mi_par[0] cyclomatic_complexity = mi_par[1] print >> outf2, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % ( filename, commit_id, mi, LOC, LLOC, SLOC, multi, blank, single_comments, halstead_volume, cyclomatic_complexity ) except SyntaxError,e : continue
def calc_radon(path): # return iter of blocks code = open(path).read() try: return cc_visit(code) except SyntaxError: # logger.warning('%s: %s', path, error) return []
def calculate_obj_complexity(filetext): complexities = [] try: complexity_objs = cc_visit(filetext) for obj in complexity_objs: complexities.append(obj.complexity) return sum(complexities) except Exception: return 0.0
def compute_complexity(source): result = [] # get cc blocks blocks = cc_visit(source) # get MI score mi = mi_visit(source, True) for func in blocks: result.append(func.name + "- CC Rank:" + cc_rank(func.complexity)) return result
def compute_complexity(source): result = [] # get complexity blocks blocks = cc_visit(source) # get MI score mi = mi_visit(source, True) for slave in blocks: result.append(slave.name + "-Rank:" + cc_rank(slave.complexity)) return result
def test_complexity(): """All source and test files should have a low cyclomatic complexity""" file_paths = glob.iglob('*/*.py') for file_path in file_paths: with open(file_path, 'r') as file_obj: blocks = radon.cc_visit(file_obj.read()) for block in blocks: fail_msg = '{} ({}) has a cyclomatic complexity of {}'.format( block.name, file_path, block.complexity) yield nose.assert_less_equal, block.complexity, 10, fail_msg
def _complexity(self, filepaths): all_complexity = 0 for filepath in filepaths: file_obj = open(join(self.repo_dir_path, filepath)) results = sorted_results(cc_visit(file_obj.read())) complexities = [i.complexity for i in results] complexity = sum(complexities) / (len(complexities) or 1) all_complexity += complexity return all_complexity, cc_rank(all_complexity)
def getCC(filepath): #get cyclical complexity of file with open(filepath, "r") as myfile: data = myfile.read() # cc = cc_visit(data) # return sum(function[len(function)-1] for function in cc) try: cc = cc_visit(data) return sum(function[len(function) - 1] for function in cc) except Exception: return 0
def test_complexity(): file_paths = glob.iglob("*/*.py") for file_path in file_paths: with open(file_path, "r") as file: blocks = radon.cc_visit(file.read()) for block in blocks: test_doc = "{} ({}) should have a low cyclomatic complexity score" test_complexity.__doc__ = test_doc.format(block.name, file_path) fail_msg = "{} ({}) has a cyclomatic complexity of {}".format(block.name, file_path, block.complexity) yield nose.assert_less_equal, block.complexity, 10, fail_msg
def test_complexity(): file_paths = glob.iglob('*/*.py') for file_path in file_paths: with open(file_path, 'r') as file: blocks = radon.cc_visit(file.read()) for block in blocks: test_doc = '{} ({}) should have a low cyclomatic complexity score' test_complexity.__doc__ = test_doc.format(block.name, file_path) fail_msg = '{} ({}) has a cyclomatic complexity of {}'.format( block.name, file_path, block.complexity) yield nose.assert_less_equal, block.complexity, 10, fail_msg
def test_complexity(): """All source file functions should have a low cyclomatic complexity.""" file_paths = itertools.chain( glob.iglob('automata/*/*.py'), glob.iglob('tests/*.py')) for file_path in file_paths: with open(file_path, 'r') as file_obj: blocks = radon.cc_visit(file_obj.read()) for block in blocks: fail_msg = '{} ({}) has a cyclomatic complexity of {}'.format( block.name, file_path, block.complexity) yield nose.assert_less_equal, block.complexity, 10, fail_msg
def calculate_average_cc(filetext): complexities = [] complexity_objs = cc_visit(filetext) for obj in complexity_objs: complexities.append(obj.complexity) total = sum(complexities) try: return total / len(complexities) except ZeroDivisionError: return 0.0
def analyze_cc(*paths): """Analyze all Python files in the provided paths and return a dictionary mapping each filename to a list of its components (functions or classes).""" result = {} for name in iter_filenames(paths, []): with open(name) as fobj: try: results = cc_visit(fobj.read()) except Exception as e: log('{0}\n{1}ERROR: {2}', name, ' ' * 4, str(e)) continue result[name] = results return result
async def analyze(self, tup: NativeBlobMetricInput) -> Iterable[Metric]: try: cc_data = cc_visit(tup.blob.data) except (SyntaxError, UnicodeDecodeError): return [] # TODO get an error output? result = [ Metric( self.name, subobject.complexity, False, ObjectIdentifier(tup.blob.id, tup.path), subobject.fullname, ) for subobject in cc_data ] return result
def collect_metrics(self, program_name): program_file = open(program_name, 'r') code = program_file.read() cc_response = cc_visit(code) raw_response = analyze(code) self.cyclomatic_complexity = None if cc_response: self.cyclomatic_complexity = cc_response[0].complexity self.source_lines_of_code = 0 self.comments = 0 if raw_response: self.source_lines_of_code = raw_response.sloc self.comments = raw_response.comments self.user_defined_functions = self.collect_user_defined_functions(code) self.program_name = program_name self.level = self.collect_level_being_used(code)
def CodeAnalysis(filepatch='.', methodname=''): comp = 0 for filename in iter_filenames([filepatch]): print(str(filename)) with open(filename) as fobj: source = fobj.read() blocks = cc_visit(source) print("Searching for " + methodname + " in " + filepatch) for i in blocks: print(i.name + ' - ' + str(i.complexity) + ' - ' + methodname) if (i.name == methodname): comp = i.complexity print('FOUND') break return comp
def analyze_cc(paths, exclude, min, max, order_function): '''Analyze the files located under `paths`. :param paths: A list of paths to analyze. :param exclude: A comma-separated string of fnmatch patterns. :param min: The minimum rank to output. :param max: The maximum rank to output. :param order_function: Can be `SCORE`, `LINES` or `ALPHA`, to sort the results respectively by CC score, line number or name.''' for name in iter_filenames(paths, exclude): with open(name) as fobj: try: results = sorted_results(cc_visit(fobj.read()), order_function) yield name, list(_filter_by_rank(results, min, max)) except Exception as e: log(name, indent=1) log_error(e, indent=1) continue
def evaluate_code(kata_test, kata_implementation, kata_path): """ Evaluate the code resulting from executing the kata_implementation stared at the given path :param kata_test: the test to pass :param kata_implementation: the kata implementation :param kata_path: the kata script """ time_consumption = timeit.timeit(lambda: kata_test(kata_implementation), number=10000) with open(kata_path, "r") as kata_file: kata_code = kata_file.read() mantainability_index = mi_visit(kata_code, False) cyclomatic_complexity = cc_visit(kata_code)[0].complexity return { "time": time_consumption, "mi": mantainability_index, "cc": cyclomatic_complexity }
def cc(min='A', max='F', show_complexity=False, average=False, exclude=None, order='SCORE', *paths): '''Analyze the given Python modules and compute Cyclomatic Complexity (CC). The output can be filtered using the *min* and *max* flags. In addition to that, by default complexity score is not displayed. :param min: The minimum complexity to display (default to A). :param max: The maximum complexity to display (default to F). :param show_complexity: Whether or not to show the actual complexity score together with the A-F rank. Default to False. :param average: If True, at the end of the analysis display the average complexity. Default to False. :param paths: The modules or packages to analyze. ''' min = min.upper() max = max.upper() average_cc = .0 analyzed = 0 order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE')) for name in iter_filenames(paths, exclude or []): with open(name) as fobj: try: results = sorted_results(cc_visit(fobj.read()), order_function) except Exception as e: log('{0}\n{1}ERROR: {2}', name, ' ' * 4, str(e)) continue cc, blocks = _print_cc_results(name, results, min, max, show_complexity) average_cc += cc analyzed += blocks if average and analyzed: cc = average_cc / analyzed ranked_cc = cc_rank(cc) log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed) log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc], ranked_cc, cc, RESET)
def analyze_method(name, filename, commits): res = [] print("Analyzing method") for c in tqdm(commits): g.execute(['git', 'checkout', c[1]]) try: with open(filename) as f: code = f.read() r = cc_visit( code ) # Will raise SyntaxError if can't parse AST (invalid code has been pushed) if isinstance(r[0], radon.visitors.Class): method = next(m for m in r[0].methods if m.name == name) elif isinstance(r[0], radon.visitors.Function): method = next(f for f in r if f.name == name) res.append((c[0], method.complexity)) except (FileNotFoundError, StopIteration, SyntaxError): res.append((c[0], "--")) return res
def analyze_cc(paths, exclude, ignore, order_function, no_assert): """Analyze the files located under `paths`. :param paths: A list of paths to analyze. :param exclude: A comma-separated string of fnmatch patterns. :param ignore: A comma-separated string of patterns to ignore. :param min: The minimum rank to output. :param max: The maximum rank to output. :param order_function: Can be `SCORE`, `LINES` or `ALPHA`, to sort the results respectively by CC score, line number or name. :param no_assert: If `True` assert statements will not be counted.""" for name in iter_filenames(paths, exclude, ignore): with open(name) as fobj: try: results = sorted_results(cc_visit(fobj.read(), no_assert=no_assert), order_function) yield name, results except Exception as e: log(name) log_error(e, indent=1) continue
def check_complexity(filepath, complex_val=10): """ Check file with radon lib and get complexity of his classes and functions """ with open(filepath, 'r') as _file: results = cc_visit(_file.read()) reports = [] for result in results: if result.complexity > complex_val: file_report = dict(file=filepath, complexity=result.complexity, message=get_complexity_message( result.complexity), line_number=result.lineno, object_name=result.name) if isinstance(result, radon_class): file_report['type'] = 'class' if isinstance(result, radon_func): file_report['type'] = 'function' reports.append(file_report) return reports
def analyze_cc(paths, exclude, ignore, order_function, no_assert): '''Analyze the files located under `paths`. :param paths: A list of paths to analyze. :param exclude: A comma-separated string of fnmatch patterns. :param ignore: A comma-separated string of patterns to ignore. :param min: The minimum rank to output. :param max: The maximum rank to output. :param order_function: Can be `SCORE`, `LINES` or `ALPHA`, to sort the results respectively by CC score, line number or name. :param no_assert: If `True` assert statements will not be counted.''' for name in iter_filenames(paths, exclude, ignore): with open(name) as fobj: try: results = sorted_results( cc_visit(fobj.read(), no_assert=no_assert), order_function) yield name, results except Exception as e: log(name) log_error(e, indent=1) continue
def in_cc_report(module: str, source: str) -> list: """ cyclomatic complexity for module "module" @param module: module name @param source: source of module @return list: of tuple (CC, length, type(C/F/M), module, class, method) """ res = [] # get cc blocks blocks = cc_visit(source) for blk in blocks: tt = str(blk).split()[0:5:2] tt[2] = tt[2].rjust(2) length = str(blk.endline - blk.lineno + 1).rjust(4) if tt[0] == 'C': res.append((tt[2], length, tt[0], module, tt[1], '')) elif tt[0] == 'F': res.append((tt[2], length, tt[0], module, '', tt[1])) else: res.append((tt[2], length ,tt[0], module, *tt[1].split('.'))) return res
def cyclomatic_complexity(): """ funkcja obliczająca złożoność cyklometryczną kodu""" cc_list = [] for actually_file in RESLUT2: with open(PATH + '\\' + actually_file, 'r') as open_file: try: open_file = open_file.read() complexity = cc_visit('''{}'''.format(open_file)) for element in complexity: split_text = str(element).split() split_text_final_lvl = split_text[2].split('.') if len(split_text_final_lvl) > 1: if split_text_final_lvl[1] != '__init__': cc_list.append( str(split_text_final_lvl[1]) + '[{}]'.format(split_text[4])) else: cc_list.append( str(split_text_final_lvl[0]) + '[{}]'.format(split_text[4])) except IndentationError as error: print(error) return cc_list
def program_complexity(projno, funcno): """ Builds and returns two dictionaries mapping filename:cc and filename:mi. """ cc_dic = {} mi_dic = {} path = os.getcwd() + \ "/extracted_files/proj{0}_func{1}/".format(projno, funcno) for fname in os.listdir(path): if fname[-4:] == '.pyc': continue try: f = open(path + fname) content = f.read() blocks = cc_visit(content) mi = mi_visit(content, True) cc_dic[fname] = blocks[0].complexity mi_dic[fname] = mi f.close() except: print " ERROR: cannot find file " + path + fname return cc_dic, mi_dic
def compute_complexity(source): result = [] blocks = cc_visit(source) for func in blocks: result.append(func.name + "- CC Rank:" + cc_rank(func.complexity)) return result
def gobble(self, fobj): '''Analyze the content of the file object.''' r = cc_visit(fobj.read(), no_assert=self.config.no_assert) if self.config.show_closures: r = add_inner_blocks(r) return sorted_results(r, order=self.config.order)
return fib(n - 1) + fib(n - 2) class C(A, B): def fr(self): return 34 ''' a = ast.parse(mdef) definitions = [n for n in ast.walk(a) if type(n) == ast.ClassDef] inheritance_tree = {} for i in definitions: inheritance_tree[i.name] = [] print(i.name) for j in i.bases: if not j.id== "object": inheritance_tree[i.name].append(j.id) print("Inherited",j.id) import pprint from radon.complexity import cc_rank, cc_visit val = cc_visit(''' class A(object): def meth(self): return sum(i for i in range(10) if i - 2 < 5) class B(A): def thi(self): return sum(i for i in range(10) if i - 2 < 5) def fib(n): if n < 2: return 1 return fib(n - 1) + fib(n - 2) ''') pp = pprint.PrettyPrinter(indent=4) #pp.pprint(val) #print(val)
def gobble(self, fobj): '''Analyze the content of the file object.''' r = cc_visit(fobj.read(), no_assert=self.config.no_assert) return sorted_results(r, order=self.config.order)
def cyclomaticComplexity(self): cc = cc_visit(self.sourceFile) return average_complexity(cc)
def get_cyclomatic_complexity(content): ''' Returns a list of blocks with respect to complexity. A block is a either Function object or a Class object. ''' return cc_visit(content)
if __name__ == '__main__': mi_dict = OrderedDict() param_dict = OrderedDict() print('1. METHODS OF PROJECT FILES SORTED BY CYCLOMATIC COMPLEXITY') for filename in iter_filenames(['.']): file_methods = [] file_complexities = [] methods_dictionary = OrderedDict() with open(filename) as fobj: source = fobj.read() # get cc blocks blocks = cc_visit(source) # get MI score mi = mi_visit(source, True) mi_dict[mi] = filename # get raw metrics raw = analyze(source) hal_vol, complexity, logic_lines, com_lines = mi_parameters(source) param_dict[complexity] = (complexity, filename, hal_vol, logic_lines, com_lines) # get metrics for each file file_complexity = sorted_results(blocks) try: file_methods.append(re.findall('(?<=.Function\(name=\')[^\']'
def fileComplexity(theFile): results = cc_visit(theFile) complexity = sum([i.complexity for i in results]) return complexity