class FinderTests(TestCase): # Create the stringlist array with 10000 elements using random string where each string contains 10 characters. # The method also note the start time. # The method also hold the first element of the array for success case comparision. def setUp(self): per_str_chars = 10 array_size = 10000 string_list = [ ''.join( random.choice(string.ascii_lowercase) for _ in range(per_str_chars)) for _ in range(array_size) ] self.first_array_str = string_list[0] self.finder = Finder(string_list) self.start_time = time.time() # The method test the success scenario by using reverse string in the # find method and then checking the first string of the array. def test_success(self): time.sleep(1) first_array_reverse_str = self.first_array_str[::-1] self.assertEqual([self.first_array_str], self.finder.find(first_array_reverse_str)) # Checking the invalid string scenario, in this case method should return None. def test_failure(self): time.sleep(2) self.assertEqual(None, self.finder.find('Invalid')) # Log the time taken by each tests. def tearDown(self): t = time.time() - self.start_time print("%s: %.3f" % (self.id(), t))
def test_buildMapEmpty(self): f = Finder("./test.txt") f.lines = [] f.buildMap() expected = {} self.assertEqual(expected, f.map)
def __init__(self, ensembleType, finder, nClassifiers, modelPath, percentOfTraining=0,\ duplicatesAllowed=False, randomSeed=42, rerankType='vote', countOther=True): """ Create an ensemble classifier. type = 'feature', 'featureType', 'abstract' finder = classifier to create copies of nClassifiers = number of classifiers in the ensemble percentOfTraining = is percentage of the training set used for each classifier's training set (if 0, then the training sets are disjoint and there is no overlap) duplicates = are duplicate training examples allowed entityTypes = list of mention types (e.g. group, outcome) to find """ Finder.__init__(self, finder.entityTypes) self.finderType = 'ensemble' self.type = ensembleType self.finder = finder self.nClassifiers = nClassifiers self.duplicatesAllowed = duplicatesAllowed self.percentOfTraining = percentOfTraining self.randomSeed = randomSeed self.baggedFeatures = [] self.modelPath = modelPath if self.modelPath[-1] != '/': self.modelPath = self.modelPath + '/' self.countOther = countOther self.rerankType = rerankType self.modelFilenames = [] for i in range(self.nClassifiers): self.modelFilenames.append('%s%s.%d.train.model' %(self.modelPath,self.entityTypesString,i)) self.ensembleTypes = set([]) for i in range(self.nClassifiers): for eType in self.entityTypes: self.ensembleTypes.add(self.toEnsembleLabel(eType, i))
def createTeam(firstIndex, secondIndex, isRed, first='OffensiveAgent', second='DefensiveDummyAgent'): """ This function should return a list of two agents that will form the team, initialized using firstIndex and secondIndex as their agent index numbers. isRed is True if the red team is being created, and will be False if the blue team is being created. As a potentially helpful development aid, this function can take additional string-valued keyword arguments ("first" and "second" are such arguments in the case of this function), which will come from the --redOpts and --blueOpts command-line arguments to capture.py. For the nightly contest, however, your team will be created without any extra arguments, so you should make sure that the default behavior is what you want for the nightly contest. """ locationFinder = Finder() locationFinder.__init__() # The following line is an example only; feel free to change it. return [ eval(first)(firstIndex, locationFinder), eval(second)(secondIndex, locationFinder) ]
class FinderTests(unittest.TestCase): def setUp(self): self.g = nx.DiGraph() self.f = Finder(self.g) def testFindsAPath(self): self.g.add_nodes_from([0, 1, 2, 3, 4]) self.g.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4)]) # Can't go from 1 to 3, or 2 to 1 test = sorted(self.f.find_paths_from(0)) self.assertEqual(test, [[0, 1, 3], [0, 1, 4], [0, 2]]) def testHandlesCyclicGraphs(self): self.g.add_nodes_from([0, 1, 2]) self.g.add_edges_from([(0, 1), (1, 2), (2, 0)]) test = sorted(self.f.find_paths_from(0)) self.assertEqual(sorted(self.f.find_paths_from(0)), [[0, 1, 2]]) def testFindsAllPaths(self): self.g.add_nodes_from([0, 1, 2, 3]) self.g.add_edges_from([(0, 1), (1, 0), (0, 2), (2, 3), (3, 1), (1, 2)]) self.assertEqual(sorted(self.f.find_all_paths()), sorted([ [0, 1, 2, 3], [0, 2, 3, 1], [1, 0, 2, 3], [1, 2, 3], [2, 3, 1, 0], [3, 1, 2], [3, 1, 0, 2]]))
def __init__(self, query): self.stop_words = map( lambda x: unicode(x), codecs.open('stop_words.txt', 'r', 'utf-8').read().split()) self.finder = Finder() self.query = query self.tokens = self.parse()
class FinderTests(unittest.TestCase): def setUp(self): self.g = nx.DiGraph() self.f = Finder(self.g) def testFindsAPath(self): self.g.add_nodes_from([0, 1, 2, 3, 4]) self.g.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4)]) # Can't go from 1 to 3, or 2 to 1 test = sorted(self.f.find_paths_from(0)) self.assertEqual(test, [[0, 1, 3], [0, 1, 4], [0, 2]]) def testHandlesCyclicGraphs(self): self.g.add_nodes_from([0, 1, 2]) self.g.add_edges_from([(0, 1), (1, 2), (2, 0)]) test = sorted(self.f.find_paths_from(0)) self.assertEqual(sorted(self.f.find_paths_from(0)), [[0, 1, 2]]) def testFindsAllPaths(self): self.g.add_nodes_from([0, 1, 2, 3]) self.g.add_edges_from([(0, 1), (1, 0), (0, 2), (2, 3), (3, 1), (1, 2)]) self.assertEqual( sorted(self.f.find_all_paths()), sorted([[0, 1, 2, 3], [0, 2, 3, 1], [1, 0, 2, 3], [1, 2, 3], [2, 3, 1, 0], [3, 1, 2], [3, 1, 0, 2]]))
def test_buildMap(self): f = Finder("./test.txt") f.lines = ["dog", "cat", "god"] f.buildMap() expected = {"dgo": ["dog", "god"], "act": ["cat"]} self.assertEqual(expected, f.map)
def on_post(self, req, resp): finder = Finder() raw = req.stream.read() data = json.loads(raw) resp.status = falcon.HTTP_200 resp.set_header('Access-Control-Allow-Origin', '*') resp.body = (finder.findMultipleDevices(data['Clients']))
def scores_for(puzzle): puzzle = normalize_nums(puzzle) F = Finder(Grid(puzzle)) return sorted([ (x, F.graph.path_values(y), y) for x, y in [(Score.calculate_from(F.graph.path_values(path)), path) for path in F.find_all_paths()] ])
def _populate_core_data(self): f = Finder(self.original_text) for attr in Attributes.ANALYSIS_ATTRIBUTES: find_function = getattr(f, f.define_finders(attr)) value = find_function() self.core_data[attr] = value debug.show_full_data(self.core_data)
def main(): WIDTH_SCREEN, HEIGHT_SCREEN = 800, 800 pygame.init() screen = pygame.display.set_mode((WIDTH_SCREEN, HEIGHT_SCREEN)) pygame.display.set_caption("A* Pathfinding") pygame.font.init() screen.fill((150, 150, 150)) renderer = Renderer(WIDTH_SCREEN, HEIGHT_SCREEN) # Initial call data = MapManager.load_map_from_file(1) # Main Loop while True: for event in pygame.event.get(): if event.type == pygame.QUIT: return elif event.type == pygame.MOUSEBUTTONUP: pos = pygame.mouse.get_pos() if event.button == 1: data = MapManager.write_map_with_mouse_click( data, pos, constants.STATUS_BLOCK) elif event.button == 3: data = MapManager.write_map_with_mouse_click( data, pos, constants.STATUS_DEFAULT) steps = Finder.find_path_with_astar(data) data = MapManager.draw_path_with_steps(data, steps) elif event.type == pygame.KEYDOWN: if event.key == pygame.K_KP1: data = MapManager.load_map_from_file(1) elif event.key == pygame.K_KP2: data = MapManager.load_map_from_file(2) elif event.key == pygame.K_KP3: data = MapManager.load_map_from_file(3) elif event.key == pygame.K_KP4: data = MapManager.load_map_from_file(4) elif event.key == pygame.K_KP5: data = MapManager.load_map_from_file(5) elif event.key == pygame.K_KP6: data = MapManager.load_map_from_file(6) elif event.key == pygame.K_KP7: data = MapManager.load_map_from_file(7) elif event.key == pygame.K_KP8: data = MapManager.load_map_from_file(8) elif event.key == pygame.K_KP9: data = MapManager.load_map_from_file(9) elif event.key == pygame.K_KP0: data = MapManager.load_map_from_file(0) elif event.key == pygame.K_f: steps = Finder.find_path_with_astar(data) data = MapManager.draw_path_with_steps(data, steps) renderer.render(screen, data) pygame.display.flip()
def __init__(self, entityTypes, tokenClassifier): """ Create a new mention finder to find a given list of mention types. entityTypes = list of mention types (e.g. group, outcome) to find """ Finder.__init__(self, entityTypes) self.tokenClassifier = tokenClassifier if self.tokenClassifier != None: self.finderType = 'mention.'+self.tokenClassifier.classifierType else: self.finderType = 'mention'
def __init__(self, entityType, sentenceFilter, useDetected=True): """ create a component that can be trained cluster similar mentions of a given type. useDetected = True if detected mentions should be clustered. Otherwise cluster annotated mentions """ Finder.__init__(self, [entityType]) self.finderType = 'clusterer' self.sentenceFilter = sentenceFilter self.useDetected = useDetected
def setUp(self): per_str_chars = 10 array_size = 10000 string_list = [ ''.join( random.choice(string.ascii_lowercase) for _ in range(per_str_chars)) for _ in range(array_size) ] self.first_array_str = string_list[0] self.finder = Finder(string_list) self.start_time = time.time()
def main(): word1 = sys.argv[1] word2 = sys.argv[2] size = len(word1)+1 # include the new line finder = Finder(size, sys.argv[3]) # flip it since we backtrack if finder.find(word2, word1): print word2 else: print "no match"
def find_words_with(characters): f = open(word_file) lines = f.read().splitlines() f.close() finder = Finder() matching_words = finder.words_that_contain_any(lines, characters) return jsonify(total_words = len(lines), matching_words = len(matching_words), words = matching_words)
def gatherLinks(pageURL): html_string = '' try: response = urlopen(pageURL) if 'text/html' in response.getheader('Content-Type'): html_bytes = response.read() html_string = html_bytes.decode("utf-8") finderObject = Finder(Spider.baseURL, pageURL) finderObject.feed(html_string) except Exception as e: print(str(e)) return set() return finderObject.pageLink()
def gather_links(page_url): html_string = '' try: response = urlopen(page_url) if 'text/html' in response.getheader('Content-Type'): html_bytes = response.read() html_string = html_bytes.decode('utf-8') finder = Finder(Spider.base_url, page_url) finder.feed(html_string) except Exception as e: print(str(e)) return set() return finder.page_links()
class HTTPHandler(BaseHTTPRequestHandler): finder = Finder('recommends.csv') @staticmethod def parse_query_string_(qs): qs_dict = parse_qs(qs[2:]) for q in qs_dict: qs_dict[q] = qs_dict[q][0] sku = qs_dict.get('sku', '') try: power = float(qs_dict.get('power', 0)) except ValueError: power = 0 return sku, power def do_GET(self): sku, power = self.parse_query_string_(self.path) if not sku: self.send_response(403, message='Bad Request') self.end_headers() else: rec = self.finder.find_rec(sku, power) if rec == -1: self.send_response(404, message='Product not found') self.end_headers() else: resp_dict = {'rec': rec} self.send_response(200, message='OK') self.end_headers() self.wfile.write(json.dumps(resp_dict).encode())
def test_finder_invalid(self): try: finder = Finder({}) except ValueError as ve: assert True except Exception as ex: assert False
def __init__(self, profiler=False): self.profiler = cProfile.Profile() skeletons = [] self.jester_ui = JesterUI({"skeletons": skeletons, "recorder": recorder, "outputs": outputs}) self.skeleton_finder = Finder(skeletons=skeletons, ui=self.jester_ui) self.jester_events = JesterEvents(skeletons=skeletons, recorder=recorder, outputs=outputs, ui=self.jester_ui, p=self.profiler)
def gather_link(page_url): html_string = '' req = Request(page_url, headers={'User-Agent': 'Mozilla/5.0'}) context = ssl._create_unverified_context() response = urlopen(req, context=context) charset = str(response.getheader('Content-Type')).split('charset=')[1] try: if 'text/html;' in response.getheader('Content-Type'): html_bytes = response.read() html_string = html_bytes.decode('utf-8') parser = TheHTMLParse(html_string, Spider.project_name, page_url) finder = Finder(Spider.base_url, page_url) finder.feed(str(html_string)) except: logthis("Spider. Sorry sir i can't crawl this page ...", Spider.project_name) return set() return finder.page_links()
def findRoute(self): self.renderMap() d = RouteDialog(self) r = Finder.findRoute(d.source,d.dest) if len(r) > 0: print "GOT ROUTE" print r self.renderRoute(r) else : tkMessageBox.showinfo("No results","No destinations match your query")
def findRoute(self): self.renderMap() d = RouteDialog(self) r = Finder.findRoute(d.source, d.dest) if len(r) > 0: print "GOT ROUTE" print r self.renderRoute(r) else: tkMessageBox.showinfo("No results", "No destinations match your query")
def __init__(self, bits): self.bits = bits max_color = int(math.pow(2, bits)) # list of Color objects in this set self.colors = [] for r in xrange(0, max_color): for g in xrange(0, max_color): for b in xrange(0, max_color): color = Color(r, g, b, bits) self.colors.append(color) random.shuffle(self.colors) # mapping from the actual (r,g,b) tuple of a color to its object self.rgb2color = {x.rgb: x for x in self.colors} rgbs = map(lambda x: x.rgb, self.colors) # does the work of finding the nearest color for a query self.finder = Finder(rgbs)
class Colorset(object): def __init__(self, bits): self.bits = bits max_color = int(math.pow(2, bits)) # list of Color objects in this set self.colors = [] for r in xrange(0, max_color): for g in xrange(0, max_color): for b in xrange(0, max_color): color = Color(r, g, b, bits) self.colors.append(color) random.shuffle(self.colors) # mapping from the actual (r,g,b) tuple of a color to its object self.rgb2color = {x.rgb: x for x in self.colors} rgbs = map(lambda x: x.rgb, self.colors) # does the work of finding the nearest color for a query self.finder = Finder(rgbs) # find the color that is "closest" to the given Color object # in (naive) euclidean space. Remove it from the colorset and # return it. def get_nearest(self, desired): if desired.bits != self.bits: raise Exception('wrong number of bits') res = self.finder.find_nearest(desired.rgb) return self.rgb2color[res] # Yields each color in the set. Does not remove them. def iterate(self): for (r, g, b) in self.finder.iterate(): yield Color(r, g, b, self.bits) def size(self): return self.finder.size()
def crawl(url): if not url in spider.links_crawled: req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) with urlopen(req) as urldata: data = urldata.read() f = Finder() f.baseurl=spider.website_url data = data.decode('utf-8') data = html.unescape(data) f.feed(data) f.close() links=f.return_links() spider.links_website.remove(url) for val in links: spider.links_website.append(val) spider.links_crawled.append(url) spider.data_dict[url] = f.return_data()
class Jester(): def __init__(self, profiler=False): self.profiler = cProfile.Profile() skeletons = [] self.jester_ui = JesterUI({"skeletons": skeletons, "recorder": recorder, "outputs": outputs}) self.skeleton_finder = Finder(skeletons=skeletons, ui=self.jester_ui) self.jester_events = JesterEvents(skeletons=skeletons, recorder=recorder, outputs=outputs, ui=self.jester_ui, p=self.profiler) def go(self): self.skeleton_finder.start() self.jester_events.start() self.jester_ui.go() save_data() s = StringIO.StringIO() sortby = 'cumulative' stats = pstats.Stats(self.profiler, stream=s).sort_stats(sortby) stats.print_stats() print s.getvalue()
def __init__(self, groupFinder, outcomeFinder, eventrateFinder, numberFinder, modelPath, jointAssignment=True, \ useRules=False, maxTopK=2, theta=0.8): """ Create a new re-ranker. """ Finder.__init__(self, ['group', 'outcome', 'eventrate', 'on', 'gs']) self.groupFinder = groupFinder self.outcomeFinder = outcomeFinder self.eventrateFinder = eventrateFinder self.numberFinder = numberFinder self.modelPath = modelPath self.featureIds = {} self.trainFolds = 5 self.maxTopK = maxTopK self.jointAssignment = jointAssignment self.useRules = useRules self.theta = theta self.labelingWeights = [] # self.labelingWeights = self.poissonWeights() self.labelingWeights = self.linearWeights() # self.labelingWeights = self.exponentialWeights() for i, w in enumerate(self.labelingWeights): print '%2d %.8f' % (i, w)
def findNeighbour(self): self.renderMap() d = NeighbourDialog(self) type_name = d.typeTxt.get() src = d.source r = Finder.findNeighbours(src, type_name) print "type = "+type_name if len(r) > 0: print "GOT ROUTES" dlist = "" for i in r: print i dlist = dlist + str(i[-1][2])+"," self.renderRoute(i) tkMessageBox.showinfo(str(len(r))+" Results Found", dlist) else : tkMessageBox.showinfo("No results","No destinations match your query")
def findNeighbour(self): self.renderMap() d = NeighbourDialog(self) type_name = d.typeTxt.get() src = d.source r = Finder.findNeighbours(src, type_name) print "type = " + type_name if len(r) > 0: print "GOT ROUTES" dlist = "" for i in r: print i dlist = dlist + str(i[-1][2]) + "," self.renderRoute(i) tkMessageBox.showinfo(str(len(r)) + " Results Found", dlist) else: tkMessageBox.showinfo("No results", "No destinations match your query")
class Test(unittest.TestCase): finder = Finder() html = "" path = "test.html" phrase = "Tekst" finder.setPhrase(phrase) finder.setOffset(30) params = ["p", "h3"] try: file = open(path, "r") html = file.read() file.close() except: print("Cannot open file") sys.exit() def test_find_phrase_with_params(self): soup = BeautifulSoup(self.html, "html.parser") self.finder.setParams(self.params) a = self.finder.find_phrase_with_params(soup, "test") b = [ 'Tekst in paragraph.Tekst in heading', 'Tekst in paragraph.Tekst in heading3' ] self.assertEqual(a, b) def test_find_phrase_without_params(self): soup = BeautifulSoup(self.html, "html.parser") a = self.finder.find_phrase_without_params(soup, "test") b = [ 'Tekst in heading1Tekst in paragrap', 'Tekst in heading1Tekst in paragraph.Tekst in headin', 'heading1Tekst in paragraph.Tekst in heading3' ] self.assertEqual(a, b)
def gather_links(page_url): #tambah disini try: finder = Finder(page_url) #tambah disini except Exception as e: return set() return finder.page_links() #ERROR DISINI
def find_duplicates(path, file_type, max_threads): finder = Finder(path, file_type, max_threads) finder.process_tree() return finder.get_duplicated_files()
def search(win, text): if not win.is_searching: win.is_searching = True win.label_status.config(text="Initiation ...", foreground='green') try: finder = Finder() finder.max_page = 2 win.label_status.config( text="Searching in https://www.pdfdrive.com ...") win.progress_bar_reset(50) win.progress_bar.start() win.update() finder.pdfdrive(text) win.label_status.config( text="Searching in http://www.allitebooks.org ...") win.update() finder.allitebooks(text) win.progress_bar.stop() win.label_status.config(text="Loading data ...") win.progress_bar_reset(len(finder.result)) win.update() for data in finder.result: win.progress_bar.step(1) win.label_status.config(text="Loading title ...") win.update() win.insert_text(data["TITLE"].decode(), 'title') win.update() data.pop('TITLE') win.insert_text('\n') if win.show_img: win.label_status.config(text="Downloading img ...") win.update() re = requests.get(data["IMG"].decode()) data.pop('IMG') if re.status_code == 200: #This text permet to center the image win.insert_text('.', 'title') win.label_status.config(text="Loading img ...") #We insert the image after the text win.insert_img(data=re.content) win.insert_text('\n') else: win.insert_text('IMG NOT FOUND', 'error') win.insert_text('\n') win.label_status.config(text="Loading img failed ...", foreground='red') win.update() win.label_status.config(text="Loading others data ...", foreground='green') win.update() for key in data: win.insert_text("%s: %s" % (key, data[key].decode())) win.insert_text('\n') win.update() except requests.exceptions.ConnectionError: win.label_status.config(text="Connection Error", foreground="red") except Exception as err: win.label_status.config(text=str(err), foreground="red") else: win.label_status.config(text="Success") win.progress_bar.stop() win.update() win.is_searching = False else: print("Searching is running")
def parse_post(request): name = request.split('=')[1] F = Finder(name) html = open_picture_html(F.trade()) return html
def setUp(self): self.g = nx.DiGraph() self.f = Finder(self.g)
import sys from PyQt5.QtGui import QGuiApplication from PyQt5.QtQml import QQmlApplicationEngine from finder import Finder def cleanUp(): finder.app_exited = True app = QGuiApplication(sys.argv) app.aboutToQuit.connect(cleanUp) engine = QQmlApplicationEngine() engine.load('UI/main.qml') finder = Finder() engine.rootObjects()[0].setProperty('finder', finder) engine.quit.connect(app.quit) sys.exit(app.exec())
def __init__(self): Finder.__init__(self)
def scores_for(puzzle): puzzle = normalize_nums(puzzle) F = Finder(Grid(puzzle)) return sorted([(x, F.graph.path_values(y), y) for x, y in [ (Score.calculate_from(F.graph.path_values(path)), path) for path in F.find_all_paths() ] ])
def __init__(self, entityTypes=[]): """ Does nothing """ Finder.__init__(self, entityTypes)
def __init__(self, entityType1, entityType2, useLabels=True): """ create a new mention-quantity associator given a specific mention type and quantity type. """ Finder.__init__(self, [entityType1, entityType2]) self.finderType = 'associator' self.useLabels = useLabels