def loop(inner={}, outer={}, innerEvent={}, book={}): if inner: fI = r.TFile.Open(inner["fileName"]) treeI = fI.Get(inner["treeName"]) assert treeI, inner["treeName"] f = r.TFile.Open(outer["fileName"]) tree = f.Get(outer["treeName"]) assert tree, outer["treeName"] for iOuterEvent in range(nEvents(tree, outer["nEventsMax"])): nb = tree.GetEntry(iOuterEvent) if nb <= 0: continue raw = collectedRaw(tree=tree, specs=outer) if inner: iInnerEvent = innerEvent[iOuterEvent] if iInnerEvent is None: continue nb = treeI.GetEntry(iInnerEvent) if nb <= 0: continue rawInner = collectedRaw(tree=treeI, specs=inner) compare.compare(raw, rawInner, book=book) else: compare.compare(raw, book=book) f.Close() if inner: fI.Close()
def registerForCourse(student, course): print "properties = ", course.printProperties() if student.coursesNumber == 5: raise Exception("No more space!") if student.coursesNumber < 5: if (compare.compare(course.time1, student.availability) and course.nSpots1 > 0): student.coursesNumber += 1 student.coursesList.append( [course.name, course.time1[0], course.time1[1]]) course.addStudent1(student) elif (compare.compare(course.time2, student.availability) and course.nSpots2 > 0): student.coursesNumber += 1 student.coursesList.append( [course.name, course.time2[0], course.time2[1]]) course.addStudent2(student) else: print 'else' pass
def search_examples(): """Some examples of using regular expression search.""" # example text body: tRNA structures from the PDB text = """ 2ow8 3.71 tRNA_Phe ribosome_(p-site) 1b23 2.60 tRNA_Cys Ef-Tu 1qrt 2.70 tRNA_Gln GlnRS 1qf6 2.90 tRNA_Thr ThrRS 1j1u 1.95 tRNA_Tyr TyrRS 2fk6 2.90 tRNA_Thr RNase_Z 2hgr 4.51 tRNA_Phe ribosome_(e_or_p-site) """ # count frequency of the word 'tRNA' result = re.findall('tRNA',text) compare(len(result),7) # extract all PDB codes result = re.findall('\d\w\w\w',text) compare(result,['2ow8','1b23','1qrt','1qf6','1j1u','2fk6','2hgr']) # extract all aromatic amino acids result = re.findall('tRNA_Phe|tRNA_Tyr|tRNA_Trp',text) compare(result,['tRNA_Phe','tRNA_Tyr','tRNA_Phe']) # extract elements separated by spaces (2 middle columns) result = re.findall(' [^ ]+ ',text) compare(len(result),14) # extract a whole line containing key phrase 'Ef-Tu' result = re.findall('(\d.*Ef-Tu.*)\n',text) compare(result,['1b23 2.60 tRNA_Cys Ef-Tu'])
def test_invalid_input(self): # ------- INVALID VERSION 1 -------- # Cannot have . with no subsequent number version1 = "3.3.3" version2 = "2." self.assertEqual(compare(version1, version2), "Wrong format") # Cannot have . before a number version1 = "2.2.2" version2 = ".2" self.assertEqual(compare(version1, version2), "Wrong format") # Cannot have letters version1 = "2.2" version2 = "sfasfdpk.asdfsa" self.assertEqual(compare(version1, version2), "Wrong format") # Cannot have negative version1 = "2.2" version2 = "-1321.1" self.assertEqual(compare(version1, version2), "Negative value versions not accepted") # ------- INVALID BOTH VERSIONS -------- #Cannot have none #cannot be empty version1 = None version2 = "" self.assertEqual(compare(version1, version2), "Invalid empty input")
def main(): # Read params reference_pc = opts['reference_pc'] compare_pc = opts['compare_pc'] reference_dsm = opts['reference_dsm'] reference_dtm = opts['reference_dtm'] compare_dsm = opts['compare_dsm'] compare_dtm = opts['compare_dtm'] aligned_compare_dsm = opts['aligned_compare_dsm'] aligned_compare_dtm = opts['aligned_compare_dtm'] epsg = opts['epsg'] resolution = float(opts['resolution']) min_height = float(opts['min_height']) min_area = float(opts['min_area']) display_type = opts['display_type'] format = opts['format'] can_align_and_rasterize = opts['can_align_and_rasterize'] == 'true' if can_align_and_rasterize: handle_if_should_align_align_and_rasterize(reference_pc, compare_pc, reference_dsm, reference_dtm, aligned_compare_dsm, aligned_compare_dtm) result_dump = compare(reference_dsm, reference_dtm, aligned_compare_dsm, aligned_compare_dtm, epsg, resolution, display_type, min_height, min_area) else: handle_if_shouldnt_align_and_rasterize(reference_dsm, reference_dtm, compare_dsm, compare_dtm) result_dump = compare(reference_dsm, reference_dtm, compare_dsm, compare_dtm, epsg, resolution, display_type, min_height, min_area) # Write the geojson as the expected format file write_to_file(result_dump, format)
def main(): ''' Main function. ''' parser = argparse.ArgumentParser(description="This is code sniffer.") parser.add_argument('--config' , metavar='filepath' , default=os.environ['HOME']+"/.config/sniffer/config" , help = "Path of configuration file." ) args = parser.parse_args() configFile = args.config # check if file exists . if not os.path.exists(configFile) : print("[E] File {0} does not exists".format(configFile)) sys.exit(0) # else parse it. import ConfigParser as cfg config = cfg.ConfigParser() config.read(configFile) db = database.buildListingDb(config) compare.compare(config, db) try: dumpResult = config.get("source", "dump_result") except Exception as e: database.writeContent(config, db) database.genrateDOT(config, db) if dumpResult == "true": database.writeContent(config, db) database.genrateDOT(config, db) else: pass
def search_examples(): """Some examples of using regular expression search.""" # example text body: tRNA structures from the PDB text = """ 2ow8 3.71 tRNA_Phe ribosome_(p-site) 1b23 2.60 tRNA_Cys Ef-Tu 1qrt 2.70 tRNA_Gln GlnRS 1qf6 2.90 tRNA_Thr ThrRS 1j1u 1.95 tRNA_Tyr TyrRS 2fk6 2.90 tRNA_Thr RNase_Z 2hgr 4.51 tRNA_Phe ribosome_(e_or_p-site) """ # count frequency of the word 'tRNA' result = re.findall('tRNA', text) compare(len(result), 7) # extract all PDB codes result = re.findall('\d\w\w\w', text) compare(result, ['2ow8', '1b23', '1qrt', '1qf6', '1j1u', '2fk6', '2hgr']) # extract all aromatic amino acids result = re.findall('tRNA_Phe|tRNA_Tyr|tRNA_Trp', text) compare(result, ['tRNA_Phe', 'tRNA_Tyr', 'tRNA_Phe']) # extract elements separated by spaces (2 middle columns) result = re.findall(' [^ ]+ ', text) compare(len(result), 14) # extract a whole line containing key phrase 'Ef-Tu' result = re.findall('(\d.*Ef-Tu.*)\n', text) compare(result, ['1b23 2.60 tRNA_Cys Ef-Tu'])
def test_find_max_children(): data = [1, -2, 3, 5, -3, 2] for index in range(0, random.randint(2000, 2050)): point = random.randint(-10000, 10000) data.append(point) test = FindMaxChildren() compare(data, FindMaxChildren.print_method, test.common, test.improve, test.improve2, test.improve3)
def test_nearest(): test = FindNearestPoint() data = [] for index in range(0, random.randint(4, 6)): point = Point(random.randint(0, 1000), random.randint(0, 1000)) data.append(point) print("create point at (%d, %d) " % (point.x, point.y)) compare(data, FindNearestPoint.print_method, test.normal, test.imporve)
def main(): #purpose: a progrem for tests. #checks if input HTML tags from the same website is equal to the tags saved in SQL SERVER DB #written by: Shir Rabi, April 2020 #input: url #output: comparision between input website Html elements to those who supposed to be db_data = get_data() current_data_tags, current_data_times = export_elements() compare(current_data_times, current_data_tags, db_data)
def substitution_examples(): """Some examples of using regular expression substitution.""" # example text text = "7 apples, 24 bananas, 14 carrots, and oranges are fruit" # replace one word by another result = re.sub('carrots', 'pears', text) compare(result, "7 apples, 24 bananas, 14 pears, and oranges are fruit") # remove all numbers result = re.sub('\d+ ', '', text) compare(result, "apples, bananas, carrots, and oranges are fruit")
def substitution_examples(): """Some examples of using regular expression substitution.""" # example text text = "7 apples, 24 bananas, 14 carrots, and oranges are fruit" # replace one word by another result = re.sub('carrots','pears',text) compare(result,"7 apples, 24 bananas, 14 pears, and oranges are fruit") # remove all numbers result = re.sub('\d+ ','',text) compare(result,"apples, bananas, carrots, and oranges are fruit")
def test_numbers(self): # GREATER version1 = "1.1.3" version2 = "1.1.2" self.assertEqual(compare(version1, version2), version1 + " is GREATER than " + version2) version1 = "2.113.3" version2 = "2.113.2" self.assertEqual(compare(version1, version2), version1 + " is GREATER than " + version2) version1 = "2.5.1" version2 = "2.4.0" self.assertEqual(compare(version1, version2), version1 + " is GREATER than " + version2) # ------- LESSER -------- version1 = "2.1.1" version2 = "2.1.2" self.assertEqual(compare(version1, version2), version1 + " is SMALLER than " + version2) version1 = "2.1.1" version2 = "2.10.1" self.assertEqual(compare(version1, version2), version1 + " is SMALLER than " + version2) version1 = "3.0.1" version2 = "3.0.2" self.assertEqual(compare(version1, version2), version1 + " is SMALLER than " + version2) # EQUAL version1 = "3.3.3" version2 = "3.3.3" self.assertEqual(compare(version1, version2), version1 + " is EQUAL to " + version2) version1 = "200.144" version2 = "200.144" self.assertEqual(compare(version1, version2), version1 + " is EQUAL to " + version2)
def wrapper(f_config, verbose=False, onlyref=False): timeseries = majatools.Timeseries(f_config, verbosity=verbose) timeseries.generate() for product in timeseries.common_product_list: context_1 = majatools.Context(timeseries.root_path + timeseries.collection_1_path + product, \ timeseries.type, \ timeseries.context_1, \ timeseries.scale_f_aot, \ timeseries.scale_f_sr, \ timeseries.nodata_aot) context_2 = majatools.Context(timeseries.root_path + timeseries.collection_2_path + product, \ timeseries.type, \ timeseries.context_2, \ timeseries.scale_f_aot, \ timeseries.scale_f_sr, \ timeseries.nodata_aot) reference = timeseries.find_reference(product) if reference is not None: if verbose: print("INFO: Got %s as reference " % reference) reference_fullpath = timeseries.reference_collection_path + reference else: reference_fullpath = None try: compare(context_1, context_2, reference=reference_fullpath, reference_name=timeseries.reference_collection_name, verbose=verbose, subset=True, ulx=timeseries.subset_ulx, uly=timeseries.subset_uly, lrx=timeseries.subset_lrx, lry=timeseries.subset_lry, report=timeseries.report, plots=timeseries.plot, quicklook=timeseries.quicklook) except IndexError as e: print(e) pass
def test_for_nltk( self ): # Attempting to use nltk. This will cause an error if the corpora is not downloaded try: # Creating a new compare object compare_nltk = compare() # Comparing using the nltk parser compare_nltk.compare_strings( [ "Back at my desk, I poured and killed him a rattlesnake and some more rattlesnake", "the cat and the mouse in the house is sitting, in the house, on the mat", "time is it?", "what time is it here?", "This is the cat's hat" ], False, 'nltk' ) # If that was successfuly, getting information sentence_information = compare_nltk.get_sentence_information() for sentence in sentence_information: my_pattern = "[ Pattern ] : " + sentence.pattern my_subject = "[ Subject ] : " + sentence.subject my_verb = "[ Verb ] : " + sentence.verb my_object = "[ Object ] : " + sentence.object[0] my_reliability_score = "[ Reliability Score ]: " + str( sentence.reliability_score ) except: # If it didn't work, this means the dependencies are missing from the system # The user will be asked whether he/she wants to install the dependencies. If so, they will be installed. # Otherwise, the program will quit and an error will appear saying the dependencies must be installed to use that parser if tkMessageBox.askokcancel( "Dependency Downloader", "Would you like to download the dependencies for nltk? The nltk parser will not be able to be used until the dependencies are downloaded.\n\nThe required space is: 1 GB" ): nltk_downloader = gui_downloader() nltk_downloader.download( "NLTK Corpora", "1 GB" ) nltk_downloader.mainloop() else: print "" print "Dependencies MUST be downloaded to use this parser. Either do not use this parser, or download the dependencies." print "" exit( 0 )
def check_it(): checkURL = "http://mirrors.zju.edu.cn/postgresql/sync_timestamp" with urllib.request.urlopen(checkURL) as response: plain_string = str(response.read()) timestruct = time.strptime(plain_string, "b'%Y-%m-%d %H:%M:%S %Z\\n'") timestamp = tstt.convert(timestruct, tstt.find_tmzone(plain_string)) return compare.compare(timestamp, 3600 * 24 * 2.5)
def run_std_test(mcgdb, delay, logfile='logfile.log', wait=False, regexes='regexes.py', print_records=False): has_regexes=os.path.exists(regexes) if has_regexes: regexes = os.path.abspath(regexes) do_cmd('make clean') do_cmd('make') do_cmd("unxz --keep --force {record_orig_py_xz}".format( record_orig_py_xz = os.path.abspath('record.orig.play.xz'), )) cmd="mcgdb_play.py {record_orig} --delay={delay} --output={record_new} --mcgdb={mcgdb} {print_records}".format( record_orig=os.path.abspath('record.orig.py'), record_new=os.path.abspath('record.new.play'), delay=delay, mcgdb=mcgdb, print_records='--print_records' if print_records else '', ) if wait: cmd+=' --wait=record.orig.play' if has_regexes: cmd+=' --regexes=%s' % regexes do_cmd(cmd,check=False) flog=open(logfile,'wb') kwargs={ 'journal1': 'record.orig.play', 'journal2': 'record.new.play', 'colorize': False, 'output': flog, } if has_regexes: kwargs['regexes']=regexes res=compare.compare(**kwargs) flog.close() return res,'See %s' % os.path.join(os.getcwd(),logfile)
def cartesian_similarity(df1, df2, keep_geom='geometry_x', **kwargs): """ Computes Cartesian product of df1 and df2, and calculates the similarity_score for each row. Parameters ---------- df1 : GeoDataFrame df2 : GeoDataFrame keep_geom : string Either 'geometry_x' or 'geometry_y', indicating which geometry column (from df1 and df2 respectively) to use in the returned GeoDataFrame Returns ------- res : GeoDataFrame Cartesian product of df1 and df2 with a new similarity_score column """ # Perform Cartesian product res = df_crossjoin(df1, df2) # Map compare function on every row prduced by the Cartesian product res['similarity_score'] = \ res[['geometry_x', 'geometry_y']].apply( lambda x: compare(x['geometry_x'], x['geometry_y'], **kwargs), \ axis=1) return gpd.GeoDataFrame(res, geometry=keep_geom)
def live_demo(): # Read in user input, clear out stop words, weight given the style of input [user_input, word_weights] = take_user_input() # if none of the input is significant at all if user_input is None: sys.exit("I am unsure what you meant. Please try again.") # Read in the possible classifications to the dictionary keywords_dictionary = read_possible_classifications("../input/text.txt") # Compare possibly classifications to significant user input, weighted # appropriately for "nots" scores = compare(user_input, keywords_dictionary, word_weights) # Calculate probabilities from the scores returned probabilities = find_probabilities(scores); # Print the probabilities, if you want to know more details #print(probabilities) print json.dumps(probabilities, indent=1) # Process scores based on probabilities process_scores(probabilities);
def compare_value(x, y, label): """ Compares the value of x and y with the corresponding compare function in compare.py according to the label :return: similarity score """ return compare.compare(x, y, label)
def my_form(): mail = request.forms.get('ADRESS') quest = request.forms.get('QUEST') if len(mail) == 0 or len(quest) == 0: return "Fill all fields!" if not compare.compare(mail): return "Enter valid email" questions = {} try: file = open("questions.json", 'r') json_str = file.read() if len(json_str) != 0: try: questions = json.loads(json_str) except json.JSONDecodeError as e: print(e) file.close() except FileNotFoundError as e: print(e) file = open("questions.json", 'w') questions[mail] = quest file.write(json.dumps(questions)) file.close() return "Thanks! The answer will be sent to the mail %s" % mail
def check_it(): checkURL = "http://mirrors.zju.edu.cn/centos/timestamp.txt" with urllib.request.urlopen(checkURL) as response: plain_string = str(response.read()) timestruct = time.strptime(plain_string, "b'%a %b %d %H:%M:%S %Z %Y\\n'") timestamp = tstt.convert(timestruct, tstt.find_tmzone(plain_string)) return compare.compare(timestamp, 3600 * 24 * 1.5)
def render(filename, width, height, bbox, quiet=False): if not quiet: print "Rendering style \"%s\" with size %dx%d ... \x1b[1;32m✓ \x1b[0m" % ( filename, width, height) print "-" * 80 m = mapnik.Map(width, height) mapnik.load_map(m, os.path.join(dirname, "styles", "%s.xml" % filename), False) if bbox is not None: m.zoom_to_box(bbox) else: m.zoom_all() expected = os.path.join(dirname, "images", '%s-%d-reference.png' % (filename, width)) if not os.path.exists('/tmp/mapnik-visual-images'): os.makedirs('/tmp/mapnik-visual-images') actual = os.path.join("/tmp/mapnik-visual-images", '%s-%d-agg.png' % (filename, width)) mapnik.render_to_file(m, actual) diff = compare(actual, expected) if diff > 0: print "-" * 80 print '\x1b[33mError:\x1b[0m %u different pixels' % diff print "-" * 80 return m
def second_pass(filenames, fname=None, silent=False): in_ = list(btx_io.read_bib_entries(*filenames)) if fname is None: fname = 'CITeX_annotated_' + os.path.basename(filenames[0]).replace( '.txt', '.bib') group_year = group_entries(in_) pt_dup = [] uniques = [] for g in group_year: dups, unique_inst = compare.compare(g) for occ in dups: pt_dup.append(occ) for unq in unique_inst: uniques.append(unq) title_diff = diff_titles(pt_dup) write_summary(pt_dup, title_diff, uniques, fname) if not silent: print('There are {} references in the input {}'.format( len(in_), 'files' if len(filenames) > 1 else 'file')) print( '{} references were found with no heuristic match of title'.format( len(uniques))) print( '{} sets of references have been highlighted for your attention in ' .format(len(pt_dup)) + str(fname)) return pt_dup, title_diff
def attempt_compare(): reg = json.loads(request.data.decode("utf-8")) required = ["data", "email"] for r in required: if r not in reg: return jsonify({"error": "invalid request"}) data, text = reg["data"], reg["email"] obj = db.users.find_one({"name": text}) if obj is None: return jsonify({"error": "I could not find you :("}) register_avg = obj["data"] costs = compare(register_avg, data) if "blank" not in costs: return jsonify(costs) blank, held = costs["blank"], costs["held"] total_cost = 0.8 * blank + 0.2 * held print("Costs: (%f, %f, %f)" % (blank, held, total_cost)) if total_cost < cost_threshold: n_avg = movavg(register_avg, data) db.users.update({"data": n_avg}, {"name": text}) return jsonify(total_cost < cost_threshold)
def endcg(): GI.getImg(900,660,1010,750) similar = CP.compare('./temp.png','./cgend.png') if similar>0.5: print('cgicom match,end yuhun') MC.click(900,660,1010,750) return True
def test02(self): indivs = main_parser.tester(self.gedfiles[1])[0] result_file = self.txtfiles[1] result = compare.compare(us31.listsingle(indivs, print_table=False), result_file, "US31") self.results += result self.assertEqual(len(result) == 0, False)
def main(): choix = int( input( "Choissisez 1. Pour l'enregistrement 2. Pour la reconnaissance du visage : " )) if choix == 1: print("Bienvenue dans le mode enregistrement") video_capture = cv2.VideoCapture(0) # appel de la fonction detection avec retour de l'image detecté face = detection(video_capture) #mise en forme de l'image nom = input("Nom de la personne : ") face = enregistrement(face, nom) #enregistrement de l'image # elif choix == 2: print("Bienvenue dans le mode reconnaissance") video_capture = cv2.VideoCapture(0) visage = compare(video_capture) # appel de la fonction detection avec retour de l'image detecté #face = detection() #mise en forme de l'image #face = format(face) #comparaison avec la base de donnée elif choix != 1 and choix != 2: choix = int( input( "Choissisez 1. Pour l'enregistrement 2. Pour la reconnaissance du visage : " ))
def test01(self): indivs = main_parser.tester(self.gedfiles[0])[0] result_file = self.txtfiles[0] result = compare.compare(us29.listdeceased(indivs, print_table=False), result_file, "US29") self.results += result self.assertEqual(len(result) == 0, True)
def anonymous(nfiles, name=True, debug=True): # Traiement des fichiers if name: compared = compare( folder=folder_test, files=nfiles, time_res=(options["time_res"] if "time_res" in options else 0), amp_res=(options["amp_res"] if "amp_res" in options else 0), fmin=(options["fmin"] if "fmin" in options else 0), fmax=(options["fmax"] if "fmax" in options else 0), nb_filters=(options["nb_filters"] if "nb_filters" in options else 0), q=(options["q"] if "q" in options else 0), n=(options["n"] if "n" in options else 0), fcs=(options["fcs"] if "fcs" in options else []), filters=(options["filters"] if "filters" in options else []), filters_fq=(options["filters_fq"] if "filters_fq" in options else []), drc_tl=(options["drc_tl"] if "drc_tl" in options else False), drc_th=(options["drc_th"] if "drc_th" in options else False), drc_r=(options["drc_r"] if "drc_r" in options else False), formants=(options["formants"] if "formants" in options else []), format=(options["format"] if "format" in options else ".wav"), adc_res=(options["adc_res"] if "adc_res" in options else 16), plotd=False) XXX, _ = to1D(compared, length=lg) else: XXX, _ = to1D([nfiles], length=lg) # Prédiction result = clf.predict(XXX) # Debug if debug: print(result) return result
def startcg(): GI.getImg(1350,710,1490,855) similar = CP.compare('./temp.png','./cg.png') if similar>0.9: print('cgicom match,start yuhun') MC.click(1350,710,1490,855) return True
def outerInnerCompare(chain, oEntry, outer, inner, innerEvent, chainI, kargs): kargs["raw1"] = collectedRaw(tree=chain, specs=outer) if innerEvent: iEntry = innerEvent[oEntry] if iEntry is None: oEntry += 1 return if chainI.GetEntry(iEntry) <= 0: return True # break! if inner: kargs["raw2"] = collectedRaw(tree=chainI, specs=inner) if outer["unpack"]: compare.compare(**kargs)
def test03(self): indivs, families = main_parser.tester(self.gedfiles[2])[:2] result_file = self.txtfiles[2] result = compare.compare( us32.list_multiple_births(indivs, families, print_table=False), result_file, 'US32') self.results += result self.assertEqual(len(result) == 0, True)
def test_pathological_different_order(self): intro, eps, end = _parse(self.pathological1) count = len(eps) for i in range(100): epsList = random.sample(eps, count) epsString = " ".join(epsList) mrs = f"{intro}RELS: < {epsString} > {end}" self.assertTrue(compare(mrs, self.pathological1))
def check_it(): checkURL = "http://mirrors.zju.edu.cn/debian/project/trace/ftp-master.debian.org" with urllib.request.urlopen(checkURL) as response: plain_string = response.read() time_line = plain_string.decode().split("\n")[0] timestruct = time.strptime(time_line, "%a %b %d %H:%M:%S %Z %Y") timestamp = tstt.convert(timestruct, tstt.find_tmzone(time_line)) return compare.compare(timestamp, 3600 * 24 * 1.5)
def test01(self): indivs, fams = main_parser.tester(self.gedfiles[0])[:2] result_file = self.txtfiles[0] result = compare.compare( us33.listorphaned(indivs, fams, print_table=False), result_file, 'US33') self.results += result self.assertEqual(len(result) == 0, True)
def test02(self): indivs = main_parser.tester(self.gedfiles[1])[0] result_file = self.txtfiles[1] result = compare.compare( us35.list_recently_born(indivs, print_table=False), result_file, 'US35') self.results += result self.assertEqual(len(result) == 0, False)
def test_inverse_better_wins_more(self): times_to_play = 4 one_wins, two_wins, record = compare(agent(360), agent(40), times_to_play, game_creator, verbose=True) print one_wins, two_wins self.assertTrue(two_wins < one_wins)
def main(): projects = ["2-1"] #, "2-1", "2-2", "2-3"] for project in projects: masters = [ # "https://github.com/timostrating/parkingsimulator", # "https://github.com/VincentCremers/Parkeergarage", # "https://github.com/rotjeking7/Parkeergarage", # "https://github.com/DirkSuelmann2/Parkeergarage", # "https://github.com/RamonBonsema08/Parkeergarage", ] file = open(project + ".txt") for line in file: if len(line.strip()) < 10: continue try: dir_name = line.split(" ")[0].replace( "\n", "").split(".com/")[1].replace("/", "_") open(f"repo_sources/{project}/{dir_name}.txt", "r").read() masters.append(line.replace("\n", "")) except: pass for id, line in enumerate(masters): dir_name = line.split(" ")[0].replace( "\n", "").split(".com/")[1].replace("/", "_") print("\n", id, ":", dir_name) commits = exc_command( f"cd hanzerepos/{project}/{dir_name} && git rev-list --all --count" ) lines_of_code = exc_command( f"cd repo_sources/{project}/ && cat {dir_name}.txt | wc -l") update(id + 4, 1, line) update(1, id + 4, line) update(id + 4, 2, commits) update(2, id + 4, commits) update(id + 4, 3, lines_of_code) update(3, id + 4, lines_of_code) for id2, line2 in enumerate(masters): if id == id2: continue dir_name2 = line2.split(" ")[0].replace( "\n", "").split(".com/")[1].replace("/", "_") value = compare(\ open(f"repo_sources/{project}/{dir_name}.txt", "r").read(), open(f"repo_sources/{project}/{dir_name2}.txt", "r").read()) if lines_of_code == 0: update(id + 4, id2 + 4, "???") else: update(id + 4, id2 + 4, int(float(value) / lines_of_code * 100))
def run(self): res = True energies = [] forces = [] if self.have_asap: try: logger.info('Running ASAP calculation ...') energy_a, forces_a = self.asap.run() energies.append(energy_a) forces.append(forces_a) except: pass try: logger.info('Running LAMMPS calculation ...') energy_l, forces_l = self.lammps.run() energies.append(energy_l) forces.append(forces_l) except: pass try: logger.info('Running potfit calculation ...') energy_p, forces_p = self.potfit.run() energies.append(energy_p) forces.append(forces_p) except: pass try: compare(energies, forces).run() except Exception as e: logger.error(e) res = False finally: if self.have_asap: self.asap.cleanup() self.lammps.cleanup() self.potfit.cleanup() return res
def run_prog(name, nodes, overlap): system("rm -rf build") if name == "base": system("(mkdir build; cd build; cmake .. -DBASELINE=ON; make)") elif name == "algo": system("(mkdir build; cd build; cmake .. -DALPHA_R=OFF; make)") elif name == "alpha-rolesim++": system("(mkdir build; cd build; cmake ..; make)") print ("\n[Running] `" + name + "` ...") start_time = time() system("time ./main > result/" + name + "_pair.log") end_time = time() compare("result/" + name + "_pair.log", "data/pair_a_c.txt", (nodes - overlap) / 2 + overlap, overlap, "result/" + name + "_onetime.log", flag=1) system("mv most_simi.log result/" + name + "_mostsimi.log") compare("result/" + name + "_mostsimi.log", "data/pair_a_c.txt", (nodes - overlap) / 2 + overlap, overlap, "result/" + name + "_simires.log", flag=1) with open("result/" + name + "_simires.log", "r") as f: lines = f.readlines() for _line in lines: line = _line.rstrip() if (line != ""): simi_correct = eval(line.split(" ")[1]) ret = { "tot": extract_res( "result/" + name + "_onetime.log", (nodes - overlap) / 2 + overlap ), "time": end_time - start_time, "simi": simi_correct } return ret
def check_it(): checkURL = "http://mirrors.zju.edu.cn/opensuse/update/" line_regex = re.compile('<a href="openSUSE-current/">openSUSE-current/</a>.*') date_regex = re.compile("\w*\-\w*-\w* \w*:\w*") with urllib.request.urlopen(checkURL) as response: plain_string = response.read().decode() time_line = date_regex.findall(line_regex.findall(plain_string)[0])[0] timestruct = time.strptime(time_line, "%d-%b-%Y %H:%M") timestamp = time.mktime(timestruct) # In local time, NO TSTT! return compare.compare(timestamp, 3600 * 24 * 3)
def update(): getEntries("apis") print "fetch apis done!" getEntries("mashups") print "fetch mashups done!" delete_duplicates() print "delete done!" api_notin_db() print "api not in db" insert_Pair() print "join table done" call([setting.db_path + "/bin/mongodump", "--db", "PW_" + timestamp, "-o", setting.working_path + "/dump/"]) print "drop done" db = setting.db_connection["utilities"] db.dump.insert({"filename" : "PW_" + timestamp}) print "update all done!" previous = db.previous.find()[0]["filename"] print previous compare("PW_" + timestamp, previous) # compare("PW_2012_09_20_17_41_01", previous) db.previous.remove({"filename" : previous}) db.previous.insert({"filename" : "PW_" + timestamp}) # db.previous.insert({"filename" : "PW_2012_09_20_17_41_01"}) print "compare done"
def run_topk(dirname): system("cp data/" + dirname + "/anonymized.txt data/") system("cp data/" + dirname + "/crawled.txt data/") system("cp data/" + dirname + "/pair.txt data/") system("./main > result/" + dirname + "_pair.log") sleep(1) compare("result/" + dirname + "_pair.log",\ "data/pair.txt",\ 7500,\ 5000,\ "result/" + dirname + ".log", \ flag = 1) system("mv most_simi.log result/" + dirname + "_mostsimi.log") compare("result/" + dirname + "_mostsimi.log",\ "data/pair.txt",\ 7500,\ 5000,\ "result/" + dirname + "_simres.log",\ flag=1) sleep(1) print "%s result: " % (dirname) system("tail -1 result/" + dirname + ".log") print "%s simi: " % (dirname) system("tail -1 result/" + dirname + "_simres.log")
def render(filename, width, height, bbox): print "-"*80 print "Rendering style \"%s\" with size %dx%d ... " % (filename, width, height) print "-"*80 m = mapnik.Map(width, height) mapnik.load_map(m, os.path.join(dirname, "styles", "%s.xml" % filename), False) if bbox is not None: m.zoom_to_box(bbox) else: m.zoom_all() basefn = os.path.join(dirname, "images", '%s-%d' % (filename, width)) mapnik.render_to_file(m, basefn+'-agg.png') diff = compare(basefn + '-agg.png', basefn + '-reference.png') if diff > 0: print "-"*80 print 'Error: %u different pixels' % diff print "-"*80 return m
def test_deletions_before(self): t1 = ('def foo():\n' ' print \'baz\' # (1)\n' ' ' ) t2 = ('def foo():\n' ' print \'foo\'\n' ' print \'bar\'\n' ' print \'baz\' # (1)\n' ' ' ) result = compare.compare(t2, t1, 4) self.assertEqual(len(result), 1) self.assertEqual(result[0].left_bounds(), (2, 16)) self.assertEqual(result[0].right_bounds(), (2, 21)) # Same idea here: both deletions on the same line. self.assertEqual(result[0].score(), 92)
def compare_to_benchmark(self, rtol): """ Are we comparing to a benchmark? """ basename = self.rp.get_param("io.basename") compare_file = "{}/tests/{}{:04d}".format( self.solver_name, basename, self.sim.n) msg.warning("comparing to: {} ".format(compare_file)) try: sim_bench = io.read(compare_file) except IOError: msg.warning("ERROR opening compare file") return "ERROR opening compare file" result = compare.compare(self.sim.cc_data, sim_bench.cc_data, rtol) if result == 0: msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol)) else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result
def test_additions(self): t1 = ('def foo():\n' ' print \'foo\' # (1)\n' ' ' ) t2 = ('def foo():\n' ' print \'foo\' # (1)\n' ' print \'bar\'\n' ' print \'baz\' # (2)\n' ' ' ) result = compare.compare(t1, t2) self.assertEqual(len(result), 2) self.assertEqual(result[0].left_bounds(), (2, 16)) self.assertEqual(result[0].right_bounds(), (2, 21)) self.assertEqual(result[0].score(), 98) self.assertEqual(result[1].left_bounds(), (4, 16)) self.assertEqual(result[1].right_bounds(), (4, 21)) self.assertEqual(result[1].score(), 100)
def render(filename, width, height, bbox, quiet=False): if not quiet: print "Rendering style \"%s\" with size %dx%d ... \x1b[1;32m✓ \x1b[0m" % (filename, width, height) print "-"*80 m = mapnik.Map(width, height) mapnik.load_map(m, os.path.join(dirname, "styles", "%s.xml" % filename), False) if bbox is not None: m.zoom_to_box(bbox) else: m.zoom_all() expected = os.path.join(dirname, "images", '%s-%d-reference.png' % (filename, width)) if not os.path.exists('/tmp/mapnik-visual-images'): os.makedirs('/tmp/mapnik-visual-images') actual = os.path.join("/tmp/mapnik-visual-images", '%s-%d-agg.png' % (filename, width)) mapnik.render_to_file(m, actual) diff = compare(actual, expected) if diff > 0: print "-"*80 print '\x1b[33mError:\x1b[0m %u different pixels' % diff print "-"*80 return m
def test(cmd, smpl_paths, tmpl_paths, patterns, out_dir, demo): # run the main process to obtain a synthesized model #no_sketch() res = main(cmd, smpl_paths, tmpl_paths, patterns, out_dir) if res: return res # simulate log_fname = os.path.join(out_dir, "simulated.txt") res = simulate.run(cmd, demo, patterns, out_dir, log_fname) if res: return res # compare logs smpl_path = os.path.join(smpl_dir, cmd, demo) for smpl in util.get_files_from_path(smpl_path, "txt"): res = compare.compare(smpl, log_fname) if res: logging.error("conflict with " + os.path.normpath(smpl)) return res logging.info("compared with " + os.path.normpath(smpl)) logging.info("test done") return 0
def test_modification(self): t1 = ('def foo():\n' ' print \'foo\' # (2)\n' ' print \'bar\'\n' ' print \'baz\'\n' ' ' ) t2 = ('def foo():\n' ' print \'chaos\' # (1)\n' # -8 ' print \'foo\' # (2)\n' ' print \'monkey\'\n' # -16 ' print \'baz\'\n' ' ' ) result = compare.compare(t1, t2, 8) self.assertEqual(len(result), 2) self.assertEqual(result[0].left_bounds(), (3, 16)) self.assertEqual(result[0].right_bounds(), (3, 21)) self.assertEqual(result[0].score(), 76) self.assertEqual(result[1].left_bounds(), (2, 18)) self.assertEqual(result[1].right_bounds(), (2, 23)) self.assertEqual(result[1].score(), 100)
def test_general_poisson_inhomogeneous(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1): """ test the general MG solver. The return value here is the error compared to the exact solution, UNLESS comp_bench=True, in which case the return value is the error compared to the stored benchmark """ # test the multigrid solver nx = N ny = nx # create the coefficient variable g = patch.Grid2d(nx, ny, ng=1) d = patch.CellCenterData2d(g) bc_c = patch.BCObject(xlb="neumann", xrb="neumann", ylb="neumann", yrb="neumann") d.register_var("alpha", bc_c) d.register_var("beta", bc_c) d.register_var("gamma_x", bc_c) d.register_var("gamma_y", bc_c) d.create() a = d.get_var("alpha") a[:,:] = alpha(g.x2d, g.y2d) b = d.get_var("beta") b[:,:] = beta(g.x2d, g.y2d) gx = d.get_var("gamma_x") gx[:,:] = gamma_x(g.x2d, g.y2d) gy = d.get_var("gamma_y") gy[:,:] = gamma_y(g.x2d, g.y2d) # create the multigrid object a = MG.GeneralMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", xl_BC=xl_func, yl_BC=yl_func, coeffs=d, verbose=verbose, vis=0, true_function=true) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) print( np.min(rhs), np.max(rhs)) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-10 a.solve(rtol=1.e-10) # alternately, we can just use smoothing by uncommenting the following #a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d,a.y2d) e = v - b enorm = a.soln_grid.norm(e) print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \ (enorm, a.relative_error, a.num_cycles)) # plot the solution if make_plot: plt.clf() plt.figure(figsize=(10.0,4.0), dpi=100, facecolor='w') plt.subplot(121) plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("nx = {}".format(nx)) plt.colorbar() plt.subplot(122) plt.imshow(np.transpose(e[a.ilo:a.ihi+1,a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("error") plt.colorbar() plt.tight_layout() plt.savefig("mg_general_inhomogeneous_test.png") # store the output for later comparison bench = "mg_general_poisson_inhomogeneous" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file) ) bench_grid, bench_data = patch.read(compare_file) result = compare.compare(my_data.grid, my_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result # normal return -- error wrt true solution return enorm
def render_cairo(m, output, scale_factor): mapnik.render_to_file(m, output, 'ARGB32', scale_factor) # open and re-save as png8 to save space new_im = mapnik.Image.open(output) new_im.save(output, 'png8:m=h') def render_grid(m, output, scale_factor): grid = mapnik.Grid(m.width, m.height) mapnik.render_layer(m, grid, layer=0) utf1 = grid.encode('utf', resolution=4) open(output,'wb').write(json.dumps(utf1, indent=1)) renderers = [ { 'name': 'agg', 'render': lambda m, output, scale_factor: mapnik.render_to_file(m, output, 'png8:m=h', scale_factor), 'compare': lambda actual, reference: compare(actual, reference, alpha=True), 'threshold': 0, 'filetype': 'png', 'dir': 'images' }, { 'name': 'cairo', 'render': render_cairo, 'compare': lambda actual, reference: compare(actual, reference, alpha=False), 'threshold': cairo_threshold, 'filetype': 'png', 'dir': 'images' }, { 'name': 'grid', 'render': render_grid, 'compare': lambda actual, reference: compare_grids(actual, reference, alpha=False), 'threshold': 0,
A program to synchronise the files between two directories given as arguments ASSUMPTION: If a file has been deleted in one directory but exists in the other, and the modified times are the same then the file is deleted in both directories Author: Hayden Knowles """ dir1 = sys.argv[1] dir2 = sys.argv[2] if dir1[-1] != '/': dir1 += '/' if dir2[-1] != '/': dir2 += '/' if not os.path.exists(dir1) and not os.path.exists(dir2): print("Error: arguments are not directories") elif os.path.exists(dir1) or os.path.exists(dir2): # make directory and update sync files if not os.path.isdir(dir1): os.makedirs(dir1) elif not os.path.isdir(dir2): os.makedirs(dir2) # update directory sync files update_sync(dir1) update_sync(dir2) # compare and update directories and sync files compare(dir1, dir2)
__author__ = 'rylan' from scrape import scrape from compare import compare from printToHTML import printToHTML # sourceOne = 'https://news.google.com/' # sourceTwo = 'https://news.yahoo.com/' sourceOne = raw_input('Please enter first newsource: ') sourceTwo = raw_input('Please enter second newsource: ') articleTitles = scrape(sourceOne, sourceTwo) pairings = compare(articleTitles[0], articleTitles[1]) printToHTML(pairings)
bbox = mapnik.Box2d(-0.05, -0.01, 0.95, 0.01) m.zoom_to_box(bbox) formatnode = mapnik.FormattingFormat() formatnode.child = mapnik.FormattingText("[name]") formatnode.fill = mapnik.Color("green") format_trees = [ ('TextNode', mapnik.FormattingText("[name]")), ('MyText', MyText()), ('IfElse', IfElse("[nr] != '5'", mapnik.FormattingText("[name]"), mapnik.FormattingText("'SPECIAL!'"))), ('Format', formatnode), ('List', mapnik.FormattingList([ mapnik.FormattingText("[name]+'\n'"), MyText() ]) ) ] for format_tree in format_trees: text.placements.defaults.format_tree = format_tree[1] mapnik.render_to_file(m, os.path.join(dirname,"images", 'python-%s.png' % format_tree[0]), 'png') compare(os.path.join(dirname,"images", 'python-%s.png' % format_tree[0]), os.path.join(dirname,"images", 'python-%s-reference.png' % format_tree[0]) ) summary()
count = {} # Create an array of note sequences for each song while i < 60: if (i == 33) or (i == 34): i += 1 else: notearray.array(i, notes) i += 1 # Map note transitions for all songs while j < 60: if (j == 32) or (j == 33) or (j == 34): j += 1 else: compare.compare(j, count, notes) j += 1 # Generate probability matrix while co < 12: compare.probability(co,count) co += 1 # Write 60 measures of song while k < 60: write.write(p, count, song) k+=1 # Display song as musicXML song.show()
def worker(input, output): for args in iter(input.get, 'STOP'): output.put(compare(*args))