def compare_parameters(udb_before, udb_after, count): # Open Database dbBefore = understand.open(udb_before) dbAfter = understand.open(udb_after) root = etree.Element("root") doc = etree.SubElement(root, "doc") entsBefore = dbBefore.ents("function,method,procedure") #entsBefore = dbBefore.ents() entsAfter = dbAfter.ents("function,method,procedure") #entsAfter = dbAfter.ents() for funcB in sorted(entsBefore, key=sortKeyFunc): for funcA in sorted(entsAfter, key=sortKeyFunc): if funcB.name() == funcA.name(): method = etree.SubElement(doc, "method", name=funcB.name()) #print("Haha, these two equal!") for paramA in funcA.ents("Define", "Parameter"): for paramB in funcB.ents("Define", "Parameter"): if paramA == paramB and paramA.type() != paramB.type(): change = etree.SubElement(method, "change") parameter = etree.SubElement( change, "parameter", oldtype=paramB.type(), newtype=paramA.type()).text = str(paramB) tree = etree.ElementTree(root) tree.write("parameter%s.xml" % count) dbBefore.close() dbAfter.close()
def understand_dict_parsing(und_db_path1, und_db_path2): parent_db = understand.open(und_db_path1) current_db = understand.open(und_db_path2) # Retrieve a list of all entities # - '~unresolved' entities are declared in Understand, but not defined # - '~volatile' only add non-volatile entities parent_db_dict = {} current_db_dict = {} for entity in parent_db.ents('~unresolved ~volatile'): key = str(entity.parent()) + '--' + str(entity.kind()) + '--' + entity.name() parent_db_dict[key] = entity for entity in current_db.ents('~unresolved ~volatile'): key = str(entity.parent()) + '--' + str(entity.kind()) + '--' + entity.name() current_db_dict[key] = entity print("Parent Keys") for key in sorted(parent_db_dict): print(key + ": " + str(parent_db_dict[key])) print("Current Keys") for key in sorted(current_db_dict): print(key + ": " + str(current_db_dict[key])) # parent to child changes match = 0 no_match = 0 not_in_parent_dict = 0 not_in_commit_dict = 0 match_ls = [] no_match_ls = [] not_in_parent_dict_ls = [] not_in_commit_dict_ls = [] for key in sorted(current_db_dict): if key not in parent_db_dict: not_in_parent_dict += 1 print(key + " is in current dictionary but not in parent dictionary") not_in_parent_dict_ls.append(key) continue elif key in parent_db_dict and is_entity_match(parent_db_dict[key], current_db_dict[key]): print(key + " is match.") match_ls.append(key) match += 1 else: print(key + " is not match") no_match += 1 no_match_ls.append(key) for key in sorted(parent_db_dict): if key not in current_db_dict: not_in_commit_dict += 1 print(key + " is in parent dictionary but not in current dictionary") not_in_commit_dict_ls.append(key) return (parent_db_dict, current_db_dict, match_ls, no_match_ls, not_in_parent_dict_ls, not_in_commit_dict_ls)
def main(): start_time = datetime.datetime.now() arguments = docopt(__doc__, version=VERSION) insert_understand_in_path(arguments["--dllDir"]) print ("\r\n====== srcdiffplot @ https://github.com/sglebs/srccheck ==========") print(arguments) try: import understand except: print ("Can' find the Understand DLL. Use --dllDir=...") print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64 or equivalent") sys.exit(-1) try: db_before = understand.open(arguments["--before"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) try: db_after = understand.open(arguments["--after"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) print("Processing %s and %s" % (db_before.name(), db_after.name())) for plot_lambda in [plot_diff_file_metrics, plot_diff_class_metrics,plot_diff_routine_metrics]: plot_lambda(db_before, db_after, arguments) prj_metric_names = [metric.strip() for metric in arguments["--prjMetrics"].split(",")] prj_metric_names = [metric for metric in prj_metric_names if len(metric)>0 ] all_metric_names, all_metric_values_before, all_metric_values_after, all_growth_rates = collect_metric_names_with_values_and_growth( db_after, db_before, prj_metric_names) output_dir = arguments["--outputDir"] file_name = os.path.split(db_before.name())[-1] + "-" + os.path.split(db_after.name())[-1] + "-diff-kiviat.png" absolute_file_name = "%s%s%s" % (output_dir, os.sep, file_name) if len (all_metric_names) > 0: saved_file_name = save_kiviat_with_values_and_thresholds(all_metric_names, all_metric_values_after, all_metric_values_before, absolute_file_name, "Prj Metrics", thresholdslabel="before", valueslabel="after") if saved_file_name is not None: print("Saved %s" % saved_file_name) print_growth_rates(all_metric_names, all_growth_rates) rates_by_adjusted_metric_name = {"Prj %s growth rate" % metric_name : rate for metric_name, rate in zip (all_metric_names, all_growth_rates)} absolute_csv_path = "%s%s%s" % (output_dir, os.sep, arguments["--outputCSV"]) csv_ok = save_csv(absolute_csv_path, rates_by_adjusted_metric_name) if csv_ok: print("+++ Growth ratio metrics saved to %s" % absolute_csv_path) else: print("\n*** Problems creating CSV file %s" % absolute_csv_path) post_metrics_to_sonar(arguments, rates_by_adjusted_metric_name) end_time = datetime.datetime.now() print("\r\n--------------------------------------------------") print("Started : %s" % str(start_time)) print("Finished: %s" % str(end_time)) print("Total: %s" % str(end_time - start_time)) print("--------------------------------------------------") db_before.close() db_after.close()
def generatejson(new_udb_path, old_udb_path): """ This function generated the json object for the entities added, removed and modified in two version of the code @param new_udb_path: path to the udb file pointing to the newer version of the code @param old_udb_path: path to the udb file pointing to the older version of the code @return: the json object with the addition, deletions and modifications due to the patch """ olddb = understand.open(old_udb_path) newdb = understand.open(new_udb_path) NB = True oldlist = elist(olddb) newlist = elist(newdb) larger, smaller = largelist(oldlist, newlist) # print(len(larger), len(smaller)) # print(len(larger[0]), len(smaller[0])) # print(larger[1078], larger[1078]) #need to access indiviudal string list. exclusive_large = (set(larger) - set(smaller)) exclusive_small = (set(smaller) - set(larger)) if NB == True: added_entities = exclusive_large removed_entities = exclusive_small else: added_entities = exclusive_small removed_entities = exclusive_large added_entities_list = getentitylist(added_entities, newdb) removed_entities_list = getentitylist(removed_entities, olddb) old_methods_dict = dict( (x.longname(), x) for x in olddb.ents("function,method,procedure ~unknown ~unresolved")) new_methods_dict = dict( (x.longname(), x) for x in newdb.ents("function,method,procedure ~unknown ~unresolved")) modified_ent = getmodifiedmethods(new_methods_dict, old_methods_dict) print(added_entities_list) print(removed_entities_list) print(modified_ent) json_data = { 'added': added_entities_list, 'removed': removed_entities_list, 'modified': modified_ent } olddb.close() newdb.close() return json.dumps(json_data)
def compare_function_metrics(udb_before, udb_after): # Open Database dbBefore = understand.open(udb_before) dbAfter = understand.open(udb_after) # the entities of database, have the parameter of function entsBefore = dbBefore.ents("function,method,procedure") entsAfter = dbAfter.ents("function,method,procedure") root = etree.Element("root") doc = etree.SubElement(root, "doc") # create list of metric aspects, here I choose three terms: Cyclomatic","MaxNesting","Essential" complexity_metrics = ["Cyclomatic", "MaxNesting", "Essential"] for i in entsBefore: for j in entsAfter: if i == j: metrics_before = i.metric(complexity_metrics) metrics_after = j.metric(complexity_metrics) k = 0 for metrics_element in metrics_before: if metrics_before[complexity_metrics[ k]] is not None and metrics_after[ complexity_metrics[k]] is not None: if metrics_after[ complexity_metrics[k]] - metrics_before[ complexity_metrics[k]] != 0: method = etree.SubElement(doc, "method", name=str(i)) change = etree.SubElement(method, "change") parameter = etree.SubElement( change, complexity_metrics[k], before=str( metrics_before[complexity_metrics[k]]), after=str(metrics_after[complexity_metrics[k]]) ).text = str(metrics_after[complexity_metrics[k]] - metrics_before[complexity_metrics[k]]) print(i, " ", complexity_metrics[k], " Change: ", metrics_after[complexity_metrics[k]] - metrics_before[complexity_metrics[k]], sep="") k = k + 1 tree = etree.ElementTree(root) tree.write("functionMetricsChange.xml") dbBefore.close() dbAfter.close()
def process(sha_list, projectname, udb_path1, udb_path2, number_of_projects): #print(sha_list) #checkout to base version try: flag, np = git_checkout(sha_list[len(sha_list) - 1], projectname, 1, number_of_projects) flag, np = git_checkout(sha_list[len(sha_list) - 1], projectname, 2, number_of_projects) project_root1 = udb_path1 project_root2 = udb_path2 if not flag: return np else: print('Checkout successful') for l in range(int(number_of_commits)): if int(len(sha_list)) < int(number_of_commits): break # print(sha_list[len(sha_list)-l-1]) language = 'java' #Checkout the next version using SHA flag, np = git_checkout(sha_list[len(sha_list) - l - 2], projectname, 1, number_of_projects) #Checkout the next version using SHA flag, np = git_checkout(sha_list[len(sha_list) - l - 3], projectname, 2, number_of_projects) #Create analyze the UDB with the new code version # print('Path',r''+udb_path1+r'\1') create_udb(r'' + udb_path1 + r'\1', language, project_root1) create_udb(r'' + udb_path2 + r'\2', language, project_root2) path1 = r'' + udb_path1 + r'\1.udb' path2 = r'' + udb_path2 + r'\2.udb' try: db1 = understand.open(path1) db2 = understand.open(path2) except understand.UnderstandError as e: logging.fatal('udb open failed') raise Exception #Executing Understand analysis execute(db1, db2, projectname, '1') db1.close() db2.close() except: print('Exception occured during processing') return 0 return np
def main(): start_time = datetime.datetime.now() arguments = docopt(__doc__, version=VERSION) sys.path.append(arguments["--dllDir"]) # add the dir with the DLL to interop with understand print ("\r\n====== srcdiffplot by Marcio Marchini: [email protected] ==========") print(arguments) try: import understand except: print ("Can' find the Understand DLL. Use --dllDir=...") print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64/python or equivalent") sys.exit(-1) try: db_before = understand.open(arguments["--before"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) try: db_after = understand.open(arguments["--after"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) print("Processing %s and %s" % (db_before.name(), db_after.name())) all_metric_names = [] all_metric_values = [] metric_names, metric_diffs = plot_diff_file_metrics(db_before, db_after, arguments) all_metric_names.extend(["File:" + name for name in metric_names]) all_metric_values.extend(metric_diffs) metric_names, metric_diffs = plot_diff_class_metrics(db_before, db_after, arguments) all_metric_names.extend(["Class:" + name for name in metric_names]) all_metric_values.extend(metric_diffs) metric_names, metric_diffs = plot_diff_routine_metrics(db_before, db_after, arguments) all_metric_names.extend(["Routine:" + name for name in metric_names]) all_metric_values.extend(metric_diffs) file_name = os.path.split(db_before.name())[-1] + "-" + os.path.split(db_after.name())[-1] + "-kiviat.png" #save_kiviat(all_metric_names, all_metric_values, file_name, "Sum of deltas of all metrics") end_time = datetime.datetime.now() print("\r\n--------------------------------------------------") print("Started : %s" % str(start_time)) print("Finished: %s" % str(end_time)) print("Total: %s" % str(end_time - start_time)) print("--------------------------------------------------") db_before.close() db_after.close()
def main(): start_time = datetime.datetime.now() arguments = docopt(__doc__, version=VERSION) insert_understand_in_path(arguments["--dllDir"]) print( "\r\n====== srchistplot @ https://github.com/sglebs/srccheck ==========" ) print(arguments) try: import understand except: print("Can' find the Understand DLL. Use --dllDir=...") print( "Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64 or equivalent" ) sys.exit(-1) try: db = understand.open(arguments["--in"]) except understand.UnderstandError as exc: print("Error opening input file: %s" % exc) sys.exit(-2) print("Processing %s" % db.name()) plot_hist_file_metrics(db, arguments) plot_hist_class_metrics(db, arguments) plot_hist_routine_metrics(db, arguments) end_time = datetime.datetime.now() print("\r\n--------------------------------------------------") print("Started : %s" % str(start_time)) print("Finished: %s" % str(end_time)) print("Total: %s" % str(end_time - start_time)) print("--------------------------------------------------") db.close()
def code_checkout(repo_url,sha,projectname): for i in range(len(sha)): #Clone the repository repo_dir=clone_repo(repo_url,projectname,i) print('\nCode checkedout at path: ' +repo_dir) #Clone the repository flag = git_checkout(sha[i],projectname,i) #print(flag) udb_path= repo_dir+'\\'+projectname file_path = project_path+projectname+'\\'+str(i) create_udb(r''+udb_path, language, repo_dir) udb_path=r''+udb_path+'.udb' #print('udb path is ' + path) try: db = understand.open(udb_path) except understand.UnderstandError as e: logging.fatal('udb open failed') raise Exception #Executing Understand analysis execute(projectname,repo_dir,udb_path,dependencies_type) generate_reports(projectname,repo_dir,udb_path) db.close()
def get_method_calls(udb_path, scope_class_name, new_name): # Open Database """Finds all of the refactored method calls in the database file and returns all of the correct references Args: udb_path (str): Address path to the database file scope_class_name (str): Name of the class in which the refactoring has to be done new_name (str): The new name of the refactored method Returns: References """ if not os.path.exists(path=udb_path): raise ValueError("Database file does not exist!") db = und.open(udb_path) method_scope = scope_class_name + "." + new_name references = [] # Find All Method Calls for ent in sorted(db.ents(), key=lambda ent: ent.name()): for ref in ent.refs(refkindstring="Call"): scope = str(ref.ent()) if scope == method_scope: references.append({ "scope": str(ref.scope()), "file_name": str(ref.file()), "file_path": str(ref.file().longname()), "line": ref.line(), "column": ref.column() }) db.close() return references
def compute_metrics_by_class_list(cls, project_db_path, n_jobs): """ """ # class_entities = cls.read_project_classes(db=db, classes_names_list=class_list, ) # print(project_db_path) db = und.open(project_db_path) class_list = UnderstandUtility.get_project_classes_longnames_java( db=db) db.close() # del db if n_jobs == 0: # Sequential computing res = [ do(class_entity_long_name, project_db_path) for class_entity_long_name in class_list ] else: # Parallel computing res = Parallel(n_jobs=n_jobs, )( delayed(do)(class_entity_long_name, project_db_path) for class_entity_long_name in class_list) res = list(filter(None, res)) columns = ['Class'] columns.extend(TestabilityMetrics.get_all_primary_metrics_names()) df = pd.DataFrame(data=res, columns=columns) # print('df for class {0} with shape {1}'.format(project_name, df.shape)) # df.to_csv(csv_path + project_name + '.csv', index=False) # print(df) return df
def main(udb_path, source_class, field_name): udb_path = "/home/ali/Desktop/code/TestProject/TestProject.udb" source_class = "Website" field_name = "HELLO_FROM_STUDENT_WEBSITE" print("Make Field Static") main_file = "" db = und.open(udb_path) for cls in db.ents("class"): if cls.simplename() == source_class: main_file = cls.parent().longname(True) if not os.path.isfile(main_file): continue if main_file is None: return stream = FileStream(main_file, encoding='utf8') lexer = JavaLexer(stream) token_stream = CommonTokenStream(lexer) parser = JavaParserLabeled(token_stream) parser.getTokenStream() parse_tree = parser.compilationUnit() my_listener = MakeFieldStaticRefactoringListener(common_token_stream=token_stream, source_class=source_class, field_name=field_name) walker = ParseTreeWalker() walker.walk(t=parse_tree, listener=my_listener) with open(main_file, mode='w', newline='') as f: f.write(my_listener.token_stream_rewriter.getDefaultText())
def generate_call_graph(git, commit_hash, repo_path): """ Generates a static call graph. :param git: Reference to the git api access. :param commit_hash: The hash of the commit to generate a call graph for. :type commit_hash: str :param repo_path: The path of the repository on the file system. :type repo_path: str :returns: The call graph at this commit. This is represented as a dictionary of methods in the full canonical form mapped to a list of methods called by that method in the same format. :rtype: dict[str, list[str]] """ call_graph = {} udb_path = _generate_understand_db(git, commit_hash, repo_path) db = understand.open(udb_path) # Generate a mapping of method names in the form # CLASS.METHOD_NAME to a list of methods that it calls. for fn in db.ents('function, method, procedure'): function_name = fn.longname() call_graph[function_name] = [ called_fn.ent().longname() for called_fn in fn.refs('Java Call') ] db.close() # Clean up the temp file when done with it. os.remove(udb_path) return call_graph
def main(udb_path, source_class, field_name, *args, **kwargs): main_file = None db = und.open(udb_path) for cls in db.ents("class"): if cls.simplename() == source_class: main_file = cls.parent().longname(True) if not os.path.isfile(main_file): continue if main_file is None: db.close() return stream = FileStream(main_file, encoding='utf8') lexer = JavaLexer(stream) token_stream = CommonTokenStream(lexer) parser = JavaParserLabeled(token_stream) parser.getTokenStream() parse_tree = parser.compilationUnit() my_listener = MakeFieldNonStaticRefactoringListener(common_token_stream=token_stream, source_class=source_class, field_name=field_name) walker = ParseTreeWalker() walker.walk(t=parse_tree, listener=my_listener) with open(main_file, mode='w', newline='') as f: f.write(my_listener.token_stream_rewriter.getDefaultText()) db.close()
def main(): udb_path = "/home/ali/Desktop/code/TestProject/TestProject.udb" source_class = "App" method_name = "testMethod" # initialize with understand main_file = "" db = und.open(udb_path) for cls in db.ents("class"): if cls.simplename() == source_class: main_file = cls.parent().longname() stream = FileStream(main_file, encoding='utf8') lexer = JavaLexer(stream) token_stream = CommonTokenStream(lexer) parser = JavaParserLabeled(token_stream) parser.getTokenStream() parse_tree = parser.compilationUnit() my_listener = IncreaseMethodVisibilityRefactoringListener( common_token_stream=token_stream, source_class=source_class, method_name=method_name) walker = ParseTreeWalker() walker.walk(t=parse_tree, listener=my_listener) with open(main_file, mode='w', newline='') as f: f.write(my_listener.token_stream_rewriter.getDefaultText())
def init_move_field(self): """ Finds fields with a class to move Returns: tuple: Refactoring main method, its parameters, and its human-readable name. """ _db = und.open(self.udb_path) refactoring_main = move_field.main params = {"udb_path": str(Path(self.udb_path))} random_field = random.choice(self._variables) params.update(random_field) classes = _db.ents("Class ~Unknown ~Anonymous ~TypeVariable ~Private ~Static") random_class = (random.choice(classes)).longname().split(".") target_package = None """ target_class: str, target_package: str, """ if len(random_class) == 1: target_class = random_class[0] elif len(random_class) > 1: target_package = '.'.join(random_class[:-1]) target_class = random_class[-1] else: return self.init_move_field() params.update({ "target_class": target_class, "target_package": target_package }) _db.close() return refactoring_main, params, 'Move Field'
def create_class_package_dict(self): classes = [] classes.extend(self.mdg_df["From Class"].values) classes.extend(self.mdg_df["To Class"].values) classes = set(classes) # print(len(classes), ) # print(classes) # print(self.mdg_df.shape) db = understand.open(self.project_db_path) for class_longname in classes: # print('Processing ',class_longname ) class_longname2 = class_longname.replace('$', '.') entities = db.lookup(re.compile(class_longname2 + r'$'), ) if entities is None or len(entities) == 0: # Nested classes self.mdg_df = self.mdg_df[~self.mdg_df["From Class"].str.contains(class_longname)] self.mdg_df = self.mdg_df[~self.mdg_df["To Class"].str.contains(class_longname)] # print('Removed rows with class', class_longname, self.mdg_df.shape) else: class_entity = entities[0] package_list = class_entity.ents('Containin', 'Java Package') while not package_list and class_entity.parent() is not None: package_list = class_entity.parent().ents('Containin', 'Java Package') class_entity = class_entity.parent() # print(package_list) if len(package_list) < 1: self.class_package_dict.update({class_longname: 'default'}) else: self.class_package_dict.update({class_longname: package_list[0].longname()}) db.close()
def main(udb_path, source_package, source_class, source_field, *args, **kwargs): db = und.open(udb_path) field_ent = db.lookup(f"{source_package}.{source_class}.{source_field}", "Variable") if len(field_ent) == 0: logger.error("Invalid inputs.") return field_ent = field_ent[0] if field_ent.simplename() != source_field: logger.error("Invalid entity.") return if not field_ent.kind().check("Private"): logger.error("Field is not private.") return parent = field_ent.parent() while parent.parent() is not None: parent = parent.parent() main_file = parent.longname() parse_and_walk(file_path=main_file, listener_class=IncreaseFieldVisibilityListener, has_write=True, source_class=source_class, source_field=source_field) db.close()
def find_push_down_field_candidates(self): _db = und.open(self.udb_path) candidates = [] class_entities = _db.ents("Class ~Unknown ~Anonymous ~TypeVariable ~Private ~Static") for ent in class_entities: params = { "source_class": "", "source_package": "", "field_name": "", "target_classes": [] } field_names = [] for ref in ent.refs("ExtendBy ~Implicit"): params["source_class"] = ent.simplename() params["source_package"] = get_package_from_class(ent.longname()) if len(params["target_classes"]) >= 1: rnd = random.randint(0, 1) if rnd == 0: params["target_classes"].append(ref.ent().simplename()) else: params["target_classes"].append(ref.ent().simplename()) for ref in ent.refs("define", "variable"): field_names.append(ref.ent().simplename()) if field_names: params["field_name"] = random.choice(field_names) else: continue if params["source_class"] != "": candidates.append(params) _db.close() return candidates
def main(): start_time = datetime.datetime.now() arguments = docopt(__doc__, version=VERSION) sys.path.append(arguments["--dllDir"]) # add the dir with the DLL to interop with understand print ("\r\n====== srchistplot by Marcio Marchini: [email protected] ==========") print(arguments) try: import understand except: print ("Can' find the Understand DLL. Use --dllDir=...") print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64/python or equivalent") sys.exit(-1) try: db = understand.open(arguments["--in"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) print("Processing %s" % db.name()) plot_hist_file_metrics(db, arguments) plot_hist_class_metrics(db, arguments) plot_hist_routine_metrics(db, arguments) end_time = datetime.datetime.now() print("\r\n--------------------------------------------------") print("Started : %s" % str(start_time)) print("Finished: %s" % str(end_time)) print("Total: %s" % str(end_time - start_time)) print("--------------------------------------------------") db.close()
def generate_metrics(self, folder_path, languages="web"): output = subprocess.check_output( "und create -languages " + str(languages) + " " + str(folder_path), shell=True) output2 = subprocess.check_output("und add -db " + str(folder_path) + ".und " + str(folder_path), shell=True) output3 = subprocess.check_output("und analyze -db " + str(folder_path) + ".und", shell=True) with os.add_dll_directory("C:/Program Files/SciTools/bin/pc-win64"): import understand # print(understand.version()) db = understand.open(str(folder_path) + ".und") metrics = db.metric(db.metrics()) # for k, v in sorted(metrics.items()): # print(k, "=", v) shutil.rmtree(str(folder_path) + ".und") version_name = os.path.basename(folder_path) metrics["version"] = version_name # import json # with open(str(version_name)+'.json', 'w') as fp: # json.dump(metrics, fp) # print(metrics) return metrics
def main(): start_time = datetime.datetime.now() arguments = docopt(__doc__, version=VERSION) insert_understand_in_path(arguments["--dllDir"]) print ("\r\n====== srchistplot @ https://github.com/sglebs/srccheck ==========") print(arguments) try: import understand except: print ("Can' find the Understand DLL. Use --dllDir=...") print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64 or equivalent") sys.exit(-1) try: db = understand.open(arguments["--in"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) print("Processing %s" % db.name()) plot_hist_file_metrics(db, arguments) plot_hist_class_metrics(db, arguments) plot_hist_routine_metrics(db, arguments) end_time = datetime.datetime.now() print("\r\n--------------------------------------------------") print("Started : %s" % str(start_time)) print("Finished: %s" % str(end_time)) print("Total: %s" % str(end_time - start_time)) print("--------------------------------------------------") db.close()
def main(udb_path, source_package, source_class, source_method, *args, **kwargs): db = und.open(udb_path) method_ent = db.lookup(f"{source_package}.{source_class}.{source_method}", "Method") if len(method_ent) == 0: logger.error("Invalid inputs.") return method_ent = method_ent[0] if method_ent.simplename() != source_method: logger.error("Invalid entity.") return if not method_ent.kind().check("Public"): logger.error("Method is not public.") return for ent in method_ent.ents("CallBy"): if f"{source_package}.{source_class}" not in ent.longname(): logger.error("Method cannot set to private.") return parent = method_ent.parent() while parent.parent() is not None: parent = parent.parent() main_file = parent.longname() parse_and_walk(file_path=main_file, listener_class=DecreaseMethodVisibilityListener, has_write=True, source_class=source_class, source_method=source_method) db.close()
def getUnderstandReport(udb_path, report_dir_path): # set plotly credentials plotly.tools.set_credentials_file(username='', api_key='') # open the udb file db = understand.open(udb_path) images = [] for a in db.root_archs(): if (len(a.children()) > 0): for child in a.children(): #print(child.longname()) child.draw( 'Internal Dependencies', report_dir_path + '/dependency_' + child.name() + '.png') images.append('dependency_' + child.name() + '.png') metric_list = 'CountLineBlank', 'CountLineCode', 'CountLineCodeDecl', 'CountLineCodeExe', 'CountLineComment' met = db.metric(metric_list) trace = go.Pie(labels=list(met.keys()), values=list(met.values())) plot = [trace] layout = go.Layout(title='Code Lines', width=800, height=640) fig = go.Figure(data=plot, layout=layout) py.image.save_as(fig, filename=report_dir_path + '/code_lines.png') db.close() doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html'): with tag('head'): with tag('style'): text( 'table, th, td {border: 1px solid black; border-collapse: collapse; padding:10px;white-space: nowrap;}' ) with tag('body'): with tag('h1'): text('Code BreakDown') with tag('table'): for key, value in met.items(): with tag('tr'): with tag('td'): text(key) with tag('td'): text(value) doc.stag('img', src=report_dir_path + '/code_lines.png') with tag('h1'): text('Dependency Graphs') for image in images: doc.stag('img', src=report_dir_path + "/" + image) report = open(report_dir_path + "/report.html", "w") report.write(doc.getvalue()) report.close() #getUnderstandReport(udbPath, abspathname + '/'+repo_name)
def _open_udb(path): udb = None try: udb = understand.open(path) yield udb finally: if udb is not None: udb.close()
def build_udb(self): list( map(lambda cmd: os.system(cmd), [ f'und create -db {self.tempdir}/the.udb -languages {self.language} > /dev/null', f'und add -db {self.tempdir}/the.udb {self.src_folder} > /dev/null', f'und analyze -db {self.tempdir}/the.udb > /dev/null' ])) return und.open(os.path.join(self.tempdir, 'the.udb'))
def convert(self, udb_file_path, ta_file_path): self.db = understand.open(udb_file_path) self.ta_file = self.open_file_for_write(ta_file_path) self.write_common_part() self.write_instance() self.write_contain() self.write_clinks() self.ta_file.close()
def main(): start_time = datetime.datetime.now() arguments = docopt(__doc__, version=VERSION) sys.path.append(arguments["--dllDir"]) # add the dir with the DLL to interop with understand print ("\r\n====== srcdiffplot by Marcio Marchini: [email protected] ==========") print(arguments) try: import understand except: print ("Can' find the Understand DLL. Use --dllDir=...") print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64/python or equivalent") sys.exit(-1) try: db_before = understand.open(arguments["--before"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) try: db_after = understand.open(arguments["--after"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) print("Processing %s and %s" % (db_before.name(), db_after.name())) all_metric_names = [] all_metric_values = [] all_thresholds = [] for plot_lambda, scope in [(plot_diff_file_metrics, "File"), (plot_diff_class_metrics, "Class"), (plot_diff_routine_metrics, "Routine")]: data_by_metric_name = plot_lambda(db_before, db_after, arguments) for metric_name in sorted(data_by_metric_name.keys()): all_metric_names.append("%s\n%s" % (scope, metric_name)) all_metric_values.append(data_by_metric_name[metric_name]["sum_after"]) all_thresholds.append(data_by_metric_name[metric_name]["sum_before"]) file_name = os.path.split(db_before.name())[-1] + "-" + os.path.split(db_after.name())[-1] + "-diff-kiviat.png" save_kiviat_with_values_and_thresholds(all_metric_names, all_metric_values, all_thresholds, file_name, "Sum of Metrics (Changed Elements)", thresholdslabel="before", valueslabel="after") end_time = datetime.datetime.now() print("\r\n--------------------------------------------------") print("Started : %s" % str(start_time)) print("Finished: %s" % str(end_time)) print("Total: %s" % str(end_time - start_time)) print("--------------------------------------------------") db_before.close() db_after.close()
def do_refactor(self): db = und.open(self.udb_path) self.get_source_class_map(db=db) listener = ExtractClassRefactoringListener( common_token_stream=self.token_stream, new_class=self.new_class, source_class=self.source_class, moved_fields=self.moved_fields, moved_methods=self.moved_methods, method_map=self.method_usage_map) self.object_name = listener.object_name self.walker.walk(listener=listener, t=self.tree) # Find Field and Method Usages field_usages = [] for field in self.moved_fields: for ent in db.lookup(f"{self.source_class}.{field}"): # print(ent.name(), " [", ent.kindname(), "]", sep="", end="\n") for ref in ent.refs("useBy, setBy, modifyBy"): if Path(ref.file().longname()) == Path(self.file_path): continue field_usage = { 'field_name': field, 'file_path': ref.file().longname() } if field_usage not in field_usages: field_usages.append(field_usage) # print(listener.token_stream_rewriter.getDefaultText()) # print("=" * 25) # print(listener.code) stream = InputStream(listener.code) lexer = JavaLexer(stream) token_stream = CommonTokenStream(lexer) parser = JavaParserLabeled(token_stream) parser.getTokenStream() parse_tree = parser.compilationUnit() my_listener = NewClassPropagation(common_token_stream=token_stream, method_map=self.method_usage_map, source_class=self.source_class, moved_fields=self.moved_fields) walker = ParseTreeWalker() walker.walk(t=parse_tree, listener=my_listener) # print(my_listener.token_stream_rewriter.getDefaultText()) # Write Changes with open(self.file_path, 'w') as f: f.write(listener.token_stream_rewriter.getDefaultText()) with open(self.new_file_path, 'w') as f: f.write(my_listener.token_stream_rewriter.getDefaultText()) # Propagate and reformat self.propagate_fields(field_usages) self.reformat(self.file_path) self.reformat(self.new_file_path) db.close()
def main(udb_path, source_package, source_class, method_name, target_classes: list, *args, **kwargs): db = und.open(udb_path) source_class_ents = db.lookup(f"{source_package}.{source_class}", "Class") target_class_ents = [] source_class_ent = None if len(source_class_ents) == 0: logger.error(f"Cannot find source class: {source_class}") return else: for ent in source_class_ents: if ent.simplename() == source_class: source_class_ent = ent break if source_class_ent is None: logger.error(f"Cannot find source class: {source_class}") return method_ent = db.lookup(f"{source_package}.{source_class}.{method_name}", "Method") if len(method_ent) == 0: logger.error(f"Cannot find method to pushdown: {method_name}") return else: method_ent = method_ent[0] for ref in source_class_ent.refs("extendBy"): if ref.ent().simplename() not in target_classes: logger.error("Target classes are not children classes") return target_class_ents.append(ref.ent()) for ref in method_ent.refs("callBy"): if ref.file().simplename().split(".")[0] in target_classes: continue else: logger.error("Method has dependencies.") return # Remove field from source class listener = parse_and_walk(file_path=source_class_ent.parent().longname(), listener_class=CutMethodListener, has_write=True, source_class=source_class, method_name=method_name, debug=False) # Insert field in children classes for target_class in target_class_ents: parse_and_walk(file_path=target_class.parent().longname(), listener_class=PasteMethodListener, has_write=True, source_class=target_class.simplename(), method_content=listener.method_content, import_statements=listener.import_statements, debug=False) db.close()
def test_udb(self): os.system('und create -db new1.udb -languages java') # create understand database for newer version os.system('und create -db old1.udb -languages java') # create understand database for older version os.system('und -db new1.udb add ' + os.getcwd() + '/new/java') # add/update newer version os.system('und -db old1.udb add ' + os.getcwd() + '/old/java') # add/update older version try: os.system('und -quiet analyze new1.udb') #analyze udb to add files into the udb object. os.system('und -quiet analyze old1.udb') except Exception as e: print("Cannot analyze data!") old1db = understand.open(os.getcwd() + '/old1.udb') #open the udb object. new1db = understand.open(os.getcwd() + '/new1.udb') self.assertIsNotNone(new1db)
if i < dictLength - 1: if [y for y in functionsLinksDictioanry[ls[0]] if y in functionsLinksDictioanry[ls[i + 1]]]: print (ls[0]) print (ls[i + 1]) counter = counter + 1 print (temp) print ("LCOM:-", len(functionsLinksDictioanry) - counter) if __name__ == '__main__': # Database path is defined in config file ConfigAttributes = fileinput.input() DB_ROOT_DIRECTORY = ConfigAttributes[0].split(' = ')[1] # Open Database print("Opening database..\n") db = understand.open(DB_ROOT_DIRECTORY) # print("Calculating the CBO metric..") startCalcTime = datetime.datetime.now() srcFile = open("CBO.txt", 'w') classesCBO = CalculateCBO() srcFile.close() endCalcTime = datetime.datetime.now() print("Total time elapsed for CBO calculation = ", endCalcTime - startCalcTime) print("Calculating the LCOM metric..") startCalcTime = datetime.datetime.now() srcFile = open("LCOM.txt", 'w') CalculateLCOM() srcFile.close()
import understand import sys def projectMetrics(db): metrics = db.metric(db.metrics()) print("SumCyclomatic,CountLine,CountLineCode") print(metrics['SumCyclomatic'], ",", metrics['CountLine'], ",", metrics['CountLineCode']) if __name__ == '__main__': args = sys.argv db = understand.open(args[1]) projectMetrics(db)
def main(): start_time = datetime.datetime.now() arguments = docopt(__doc__, version=VERSION) insert_understand_in_path(arguments["--dllDir"]) print ("\r\n====== srcscatterplot @ https://github.com/sglebs/srccheck ==========") print(arguments) try: import understand except: print ("Can' find the Understand DLL. Use --dllDir=...") print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64 or equivalent") sys.exit(-1) try: db = understand.open(arguments["--in"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) print("Processing %s" % db.name()) end_time = datetime.datetime.now() config = load_config(arguments["--config"]) if not isinstance(config, dict): print ("Malformed config value.") exit(1) query_by_scope_name = {"file": arguments["--fileQuery"], "class": arguments["--classQuery"], "routine": arguments["--routineQuery"]} regex_by_scope_name = {"file": arguments["--regexIgnoreFiles"], "class": arguments["--regexIgnoreClasses"], "routine": arguments["--regexIgnoreRoutines"]} for scope_name, scope_configs in config.items(): if scope_name.lower() not in query_by_scope_name: print("WARNING/SKIPPING:Unsupported scope %s" % scope_name) continue if not isinstance(scope_configs, list): print("WARNING/SKIPPING: Malformed configs for scope %s" % scope_name) continue for scope_config in scope_configs: if not isinstance(scope_config, dict): print("WARNING/SKIPPING: Malformed config for scope %s" % scope_name) continue ok = scatter_plot(db, arguments, query_by_scope_name[scope_name.lower()], regex_by_scope_name[scope_name.lower()], scope_name, scope_config.get("xMetric", "CountLineCode"), scope_config.get("yMetric", "AvgCyclomaticModified"), scope_config.get("ballMetric", "MaxNesting"), float(scope_config.get("ballSizeMin", 40)), float(scope_config.get("ballSizeMax", 4000)), float(scope_config.get("ballSizeRate", 10)), x_metric_min_value=float(scope_config.get("xMetricMinValue", 0.0)), y_metric_min_value=float(scope_config.get("yMetricMinValue", 0.0)), ball_metric_min_value=float(scope_config.get("ballMetricMinValue", 0.0)) ) if not ok: print("WARNING/SKIPPING: Could not create plot for scope %s with config %s" % (scope_name, scope_config)) continue print("\r\n--------------------------------------------------") print("Started : %s" % str(start_time)) print("Finished: %s" % str(end_time)) print("Total: %s" % str(end_time - start_time)) print("--------------------------------------------------") db.close()
if __name__ == '__main__': start_time = datetime.datetime.now() arguments = docopt(__doc__, version='Source Code Checker') sys.path.append(arguments["--dllDir"]) # add the dir with the DLL to interop with understand print ("\r\n====== srccheck by Marcio Marchini: [email protected] ==========") print(arguments) try: import understand except: print ("Can' find the Understand DLL. Use --dllDir=...") print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win32/python or equivalent") sys.exit(-1) try: db = understand.open(arguments["--in"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) print ("\r\n====== PRJ Metrics (%s) ==========" % db.name()) print_prj_metrics(db, arguments) print ("") print ("\r\n====== Prj Metrics that failed the filters ===========") [total_violation_count , prj_tracked_metrics] = process_prj_metrics(db, arguments) print ("") print ("\r\n====== File Metrics that failed the filters ===========") [violation_count, file_tracked_metrics] = process_file_metrics(db, arguments) total_violation_count = total_violation_count + violation_count print ("") print ("\r\n====== Class Metrics that failed the filters ==========") [violation_count, class_tracked_metrics] = process_class_metrics(db, arguments)
def main(): start_time = datetime.datetime.now() arguments = docopt(__doc__, version=VERSION) insert_understand_in_path(arguments["--dllDir"]) print ("\r\n====== srccheck @ https://github.com/sglebs/srccheck ==========") print(arguments) try: import understand except: print ("Can' find the Understand DLL. Use --dllDir=...") print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64 or equivalent") sys.exit(-1) try: db = understand.open(arguments["--in"]) except understand.UnderstandError as exc: print ("Error opening input file: %s" % exc) sys.exit(-2) adaptive = arguments.get("--adaptive", False) print ("\r\n====== Project Metrics (%s) (%s) ==========" % (db.name(), db.language()[0])) prj_metrics = project_metrics(db, arguments) print_prj_metrics(prj_metrics) print ("") print ("\r\n====== Project Metrics that failed the filters ===========") [total_violation_count , prj_tracked_metrics, prj_max_metrics ] = process_prj_metrics(arguments, prj_metrics) if adaptive: write_metrics_thresholds(arguments.get("--maxPrjMetrics", False), prj_tracked_metrics) print ("") print ("\r\n====== File Metrics that failed the filters ===========") [violation_count, file_tracked_metrics, file_max_metrics ] = process_file_metrics(db, arguments) total_violation_count = total_violation_count + violation_count if adaptive: write_metrics_thresholds(arguments.get("--maxFileMetrics"), file_tracked_metrics) print ("") print ("\r\n====== Class Metrics that failed the filters ==========") [violation_count, class_tracked_metrics, class_max_metrics ] = process_class_metrics(db, arguments) total_violation_count = total_violation_count + violation_count if adaptive: write_metrics_thresholds(arguments.get("--maxClassMetrics"), class_tracked_metrics) print ("") print ("\r\n====== Routine Metrics that failed the filters ==========") [violation_count, routine_tracked_metrics, routine_max_metrics ] = process_routine_metrics(db, arguments) total_violation_count = total_violation_count + violation_count if adaptive: write_metrics_thresholds(arguments.get("--maxRoutineMetrics"), routine_tracked_metrics) print ("") print ("\r\n====== Publishing selected metrics ===========") tracked_metrics = {} append_dict_with_key_prefix (tracked_metrics, prj_tracked_metrics, "Prj") append_dict_with_key_prefix (tracked_metrics, file_tracked_metrics, "File") append_dict_with_key_prefix (tracked_metrics, class_tracked_metrics, "Class") append_dict_with_key_prefix (tracked_metrics, routine_tracked_metrics, "Routine") max_metrics = {} append_dict_with_key_prefix (max_metrics, prj_max_metrics, "Prj") append_dict_with_key_prefix (max_metrics, file_max_metrics, "File") append_dict_with_key_prefix (max_metrics, class_max_metrics, "Class") append_dict_with_key_prefix (max_metrics, routine_max_metrics, "Routine") output_dir = arguments["--outputDir"] file_prefix = "%s%s%s" % (output_dir, os.sep, os.path.split(db.name())[-1]) file_name = save_kiviat_of_metrics(tracked_metrics, max_metrics, arguments, file_prefix) print("Kiviat saved to %s"% file_name) absolute_csv_path = "%s%s%s" % (output_dir, os.sep, arguments["--outputCSV"]) csv_ok = save_csv(absolute_csv_path, tracked_metrics) if csv_ok: print("+++ Metrics saved to %s" % absolute_csv_path) else: print ("\n*** Problems creating CSV file %s" % absolute_csv_path) post_metrics_to_sonar(arguments, tracked_metrics) print ("") end_time = datetime.datetime.now() print ("\r\n--------------------------------------------------") print ("Started : %s" % str(start_time)) print ("Finished: %s" % str(end_time)) print ("Total: %s" % str(end_time-start_time)) print ("Violations: %i" % total_violation_count) print ("--------------------------------------------------") db.close() sys.exit(total_violation_count)
def open(self, path): if self._db: self._db.close() self._dbPath = path self._db = understand.open(path) self.onOpen()
def reopen(self): if self._dbPath: self._db = understand.open(self._dbPath) self.onOpen()
n_en = n_lex.ent() #print(n_en.simplename()) if n_en.simplename() == "lock": return True else: return False line-=1 return False """ main function """ """ currently taking manually line number and keyword""" if __name__=='__main__': db = understand.open(path) lines = [2535,2543] keyword = ["server","bl"] lex = get_lexer(cur_file) c = 0 """ Checking each line where error occurs """ for line in lines: method = get_method(lex,line,keyword[c]) line-=1 """ Start our function """ res = check_fun(lex,line,method) if res==True: print("false positive")
import understand #projects=["tomcat6.0.0","tomcat6.0.43","tomcat7.0.0","tomcat7.0.61","tomcat8.0.0","tomcat8.0.21"] projects=['tomcat8.0.22','tomcat8.0.23'] for project in projects: db=understand.open("..//"+project+".udb") package={} edges=[] file=open(".//tomcat_history//"+project+"//tomcat_pack.txt","w+"); p=sorted(db.ents("package"),key=lambda ent: ent.name()) for ent in p: l=ent.refs("contain"); m=ent.metric(['AvgCyclomatic','CountLineCode']) file.write("%s\t%d\t%s\t%s\n"%(ent.name(),len(l),m['CountLineCode'],m['AvgCyclomatic'])) for t in l: package[t.file().name()]=ent x=t.file().metric(['CountLineCode','AvgCyclomatic']) file.write("\t%s\t%s\t%s\n"%(t.file().name(),x['CountLineCode'],x['AvgCyclomatic'])) file.close() for ent in p: for t in ent.refs("contin"): for x in t.file().depends().keys(): if x.name() in package: if[ent,package[x.name()]] not in edges: edges.append([ent,package[x.name()]]) file=open(".//tomcat_history//"+project+"//tomcat_pack_depends.txt","w+") for x,y in edges: file.write("%s %s\n"%(x,y)) file.close() db.close()
import understand #import matplotlib.pyplot as plt #import networkx as nx #projects=['6.0.0','6.0.43','7.0.0','7.0.61','8.0.0','8.0.21'] projects=['8.0.22','8.0.23'] for project in projects: udb = understand.open("../tomcat"+project+".udb") file=open("tomcat_history//tomcat"+project+"//tomcat.txt","w") filedep=open("tomcat_history//tomcat"+project+"//tomcat_depends.txt","w") edges=[] nodes=[] for ent in sorted(udb.ents("file"),key=lambda ent: ent.name()): x=ent.metric(['CountLine','AvgCyclomatic']) nodes.append(ent.name()) file.write("%s\t%s\t%s\n"%(ent.name(),x['CountLine'],x['AvgCyclomatic'])) for t in ent.depends().keys(): filedep.write("%s\t%s\n"%(ent.name(),t.name())) edges.append([ent.name(),t.name()]) file.close() filedep.close()