def print_search_results(results, min_i, max_i): """Prints the formatted results from a search of posts Args: results ([results row]): The list of search result rows min_i (int): The minimum index of the printed results range (inclusive) max_i (int): The maximum index of the printed results range (exclusive) """ # Get table max_widths = {2: 20, 3: 30} # title and body (index 2 and 3) before index header = [ "i", "pid", "pdate", "title", "body", "poster", "# keywords", "votes", "answers" ] table, widths = get_table_info(results[min_i:max_i], header, trunc_widths=max_widths, index_start=min_i + 1) # Start indices at 1 # Generate width string # Right-aligned index, 5 left-aligned columns, 3 right-aligned columns width_str = "{{:>{}}} " + "{{:{}}} " * 5 + "{{:>{}}} " * 2 + "{{:>{}}}" width_str = width_str.format(*widths) # Print the table print_table(table, width_str, widths) print("")
def test_monte_carlo_localization(): # TODO: Add tests for random motion/inaccurate sensors random.seed('aima-python') m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]]) def P_motion_sample(kin_state, v, w): """Sample from possible kinematic states. Returns from a single element distribution (no uncertainty in motion)""" pos = kin_state[:2] orient = kin_state[2] # for simplicity the robot first rotates and then moves orient = (orient + w) % 4 for _ in range(orient): v = (v[1], -v[0]) pos = vector_add(pos, v) return pos + (orient,) def P_sensor(x, y): """Conditional probability for sensor reading""" # Need not be exact probability. Can use a scaled value. if x == y: return 0.8 elif abs(x - y) <= 2: return 0.05 else: return 0 from utils import print_table a = {'v': (0, 0), 'w': 0} z = (2, 4, 1, 6) S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m) grid = [[0] * 17 for _ in range(11)] for x, y, _ in S: if 0 <= x < 11 and 0 <= y < 17: grid[x][y] += 1 print("GRID:") print_table(grid) a = {'v': (0, 1), 'w': 0} z = (2, 3, 5, 7) S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S) grid = [[0] * 17 for _ in range(11)] for x, y, _ in S: if 0 <= x < 11 and 0 <= y < 17: grid[x][y] += 1 print("GRID:") print_table(grid) assert grid[6][7] > 700
def print_search_results(results, min_i, max_i, answer=False, accepted_id=None): """Prints the formatted results from a search of posts Args: results ([results row]): The list of search result rows min_i (int): The minimum index of the printed results range (inclusive) max_i (int): The maximum index of the printed results range (exclusive) answer(bool): Whether the results are answers or not """ # Get table if answer: max_widths = {0: 80} # body (index 0) before index header = ["Index", "Body", "CreationDate", "Score"] else: max_widths = {1: 30} # title (index 2) before index header = ["Index", "Title", "CreationDate", "Score", "AnswerCount"] table, widths = get_table_info(results[min_i:max_i], header, trunc_widths=max_widths, index_start=min_i + 1, # Start indices at 1 answer=answer) if accepted_id: table[1][0] = "*" + table[1][0] # Generate width string # Right-aligned index * len(header) width_str ="{{:{}}} " * len(header) width_str = width_str.format(*widths) # Print the table print_table(table, width_str, widths) print("")
def do_ls(self, ignored): """ List all available instances """ data = [('Instance ID', 'Instance Name')] for instance in self._for_all_instances(): data.append((instance.id, instance.tags.get('Name', ''))) print_table(data)
def run_store(self): EXIT = False SHOPPING_CART = [['health-potion', 3]] try: while not EXIT: print(self._OPTIONS) customer_option = int(input("Please choose your option: ")) print() if customer_option is 1: print_table(self._STORAGE, headers=['ID', 'Item Name', 'Price']) elif customer_option is 2: print_table(SHOPPING_CART, headers=['ID', 'Item Name', 'Quantity']) total = calculate_total_price(SHOPPING_CART, self._STORAGE) print(f'Total: {total} G') elif customer_option is 3: order = [input("Item name: "), int(input("Quantity: "))] add_order_to_cart(order, SHOPPING_CART, self._STORAGE) elif customer_option is 4: remove_order_from_cart(SHOPPING_CART) elif customer_option is 5: print(""" TODO: + Add items to player's inventory + Minus player's gold """) print( f"You've paid {calculate_total_price(SHOPPING_CART, self._STORAGE)}G !" ) SHOPPING_CART = [] elif customer_option is 6: EXIT = True except Exception as e: print(f'Error - DragonStore: {type(e)} {e}')
def rsync_fb_conf(): logger.info('Sync conf...') cluster_id = config.get_cur_cluster_id() if not validate_id(cluster_id): logger.warn('Invalid cluster id: {}'.format(cluster_id)) return cluster_list = get_cluster_list() if cluster_id not in cluster_list: logger.warn('Cluster not exist: {}'.format(cluster_id)) return my_address = config.get_local_ip_list() path_of_fb = config.get_path_of_fb(cluster_id) props_path = path_of_fb['redis_properties'] key = 'sr2_redis_master_hosts' nodes = config.get_props(props_path, key, []) meta = [['HOST', 'RESULT']] path_of_fb = config.get_path_of_fb(cluster_id) conf_path = path_of_fb['conf_path'] cluster_path = path_of_fb['cluster_path'] for node in nodes: if net.get_ip(node) in my_address: meta.append([node, color.green('OK')]) continue client = net.get_ssh(node) if not client: meta.append([node, color.red('SSH ERROR')]) continue if not net.is_dir(client, cluster_path): meta.append([node, color.red('NO CLUSTER')]) continue net.copy_dir_to_remote(client, conf_path, conf_path) meta.append([node, color.green('OK')]) utils.print_table(meta)
def compare(algorithms=None, datasets=None, k=10, trials=1): """Compare various learners on various datasets using cross-validation. Print results as a table.""" algorithms = algorithms or [ PluralityLearner, NaiveBayesLearner, # default list NearestNeighborLearner, DecisionTreeLearner ] # of algorithms datasets = datasets or [ iris, orings, zoo, restaurant, SyntheticRestaurant(20), # default list Majority(7, 100), Parity(7, 100), Xor(100) ] # of datasets print_table([[a.__name__.replace('Learner', '')] + [cross_validation(a, d, k, trials) for d in datasets] for a in algorithms], header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
def _show_corpus(self): '''pretty print the internal path list''' # TODO: show the vectors in the last column try: print_table(map(os.path.basename, self._signals.keys())) except: raise ValueError("no signal entries exist yet!?...add some first")
def test_monte_carlo_localization(): ## TODO: Add tests for random motion/inaccurate sensors random.seed('aima-python') m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]]) def P_motion_sample(kin_state, v, w): """Sample from possible kinematic states. Returns from a single element distribution (no uncertainity in motion)""" pos = kin_state[:2] orient = kin_state[2] # for simplicity the robot first rotates and then moves orient = (orient + w)%4 for _ in range(orient): v = (v[1], -v[0]) pos = vector_add(pos, v) return pos + (orient,) def P_sensor(x, y): """Conditional probability for sensor reading""" # Need not be exact probability. Can use a scaled value. if x == y: return 0.8 elif abs(x - y) <= 2: return 0.05 else: return 0 from utils import print_table a = {'v': (0, 0), 'w': 0} z = (2, 4, 1, 6) S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m) grid = [[0]*17 for _ in range(11)] for x, y, _ in S: if 0 <= x < 11 and 0 <= y < 17: grid[x][y] += 1 print("GRID:") print_table(grid) a = {'v': (0, 1), 'w': 0} z = (2, 3, 5, 7) S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S) grid = [[0]*17 for _ in range(11)] for x, y, _ in S: if 0 <= x < 11 and 0 <= y < 17: grid[x][y] += 1 print("GRID:") print_table(grid) assert grid[6][7] > 700
def main(): EXIT = False SHOPPING_CART = [['health-potion', 3]] try: while not EXIT: print(OPTIONS) customer_option = int(input("Please choose your option: ")) print() if customer_option is 1: print_table(STORAGE, headers=['ID', 'Item Name', 'Price']) elif customer_option is 2: print_table(SHOPPING_CART, headers=['ID', 'Item Name', 'Quantity']) total = calculate_total_price(SHOPPING_CART, STORAGE) print(f'Total: {total}') elif customer_option is 3: product = [input("Item name: "), int(input("Quantity: "))] add_product_to_cart(product, SHOPPING_CART, STORAGE) elif customer_option is 4: print("4") elif customer_option is 5: print("5") elif customer_option is 6: EXIT = True except Exception as e: print(f'Error - DragonStore: {type(e)} {e}')
def print_help(): # Print project's help table print_fast_help() print("Options: ") title = ("Formats", "Examples") menu = [ ["%-14s" % ("-u <file_path>"), "-u /opt/wordlists/nmap.lst"], ["%-14s" % ("-p <file_path>"), "-p /opt/wordlists/passwd.txt"], ["%-14s" % ("-U <username>"), "-U admin | -U admin:user1"], ["%-14s" % ("-t <threads>"), "-t 32"], # [ # "%-14s" %("-T <timeout>"), # "-t 25" # ], ["%-14s" % ("-l <file_path>"), "-l url_list.txt"], ] utils.print_table(title, *menu) print("\nRunning mode:") title = ("Modes", "Descriptions") menu = [ ["%-14s" % ("--proxy"), "Attack using proxies"], ["%-14s" % ("--verbose"), "Display running information"], ] utils.print_table(title, *menu) print("\nExtra modes: Combines with attack mode") title = ("Modes", "Descriptions") menu = [ ["%-14s" % ("--reauth"), "Check credentials on social networks"], ["%-14s" % ("--getproxy"), "Provide new proxy list"], ["%-14s" % ("--upwd"), "Add username to passlist"], ] utils.print_table(title, *menu) print("\nWordlists: Values will be replaced by [-U/-u/-p] options") title = ("List name", "Descriptions") menu = [ ["%-14s" % ("default"), "Top common users+passwords"], ["%-14s" % ("router"), "Router wordlist"], ["%-14s" % ("tomcat"), "Tomcat manager wordlist"], ["%-14s" % ("cctv"), "CCTV wordlist"], ["%-14s" % ("unix"), "Top Unix wordlist"], ["%-14s" % ("http"), "Top HTTP wordlist"], ["%-14s" % ("mirai"), "Mirai botnet wordlist"], ["%-14s" % ("webshell"), "Webshell wordlist"], ["%-14s" % ("sqli"), "Dynamic SQLi payloads"], ] utils.print_table(title, *menu) print("") print("\nPassword generators: Generate password from text or keywords") title = ("Values", "Descriptions") menu = [ ["--toggle_case", "Replace letter by its uppercase and lowercase"], ["--replacement", "Replace letter by special characters"], ] utils.print_table(title, *menu) print("") sys.exit(0)
def test_score(): test_table = dp_table.DPTable("GCCCT", "GCGCA") test_table.build_table() test_table.base_cases() test_table.fill_matrix() test_table.get_score() utils.print_table(test_table.table) print(test_table.score)
def print_reward_comparison(mdp, pi, expert_mdp, expert_trace): utils.print_table(mdp.to_arrows(pi)) print "vs" utils.print_table(mdp.to_arrows(expert_trace)) print("Policy difference is " + str(get_policy_difference(pi, expert_trace))) mdp.print_rewards() print "vs" expert_mdp.print_rewards()
def test_backtrack(): test_table = dp_table.DPTable("GCCCT", "GCGCA") test_table.build_table() test_table.base_cases() test_table.fill_matrix() utils.print_table(test_table.table) test_table.backtrack() print(test_table.aligned1) print(test_table.aligned2)
def show_contexts(self): contexts = list(self.contexts.values()) contexts.sort(key=lambda c: c.name) struct = [ ('context', lambda a: a, '<', 'name', lambda a: a), ('visibility', 10, '<', 'visibility', lambda a: a), ('priority', 8, '<', 'priority', lambda a: str(a)), ('undone tasks', 12, '<', 'population', lambda a: str(a)) ] utils.print_table(struct, contexts, 80)
def _show_platforms(self, *args, **kwargs): headers = ("Name", "Architecture", "Description") info = [ ["windows", "x86 / X64", ""], ["linux", "x86 / x64", ""], # ["android", "davik", "Android mobile system"], # ["osx", "x86 / x64", ""] ] utils.print_table(headers, *info) print('')
def compare_searchers(problems, header, searchers=[astar_search, depth_first_tree_search]): def do(searcher, problem): p = InstrumentedProblem(problem) searcher(p) return p table = [[name(s)] + [do(s, p) for p in problems] for s in searchers] print_table(table, header)
def compare_searchers(problems, header, searchers=[ breadth_first_search, depth_first_search, iterative_deepening_search,]): def do(searcher, problem): p = InstrumentedProblem(problem) g=searcher(p) p.final_cost=g.path_cost return p table = [[name(s)] + [do(s, p) for p in problems] for s in searchers] print_table(table, header,"Expanded/Goal Tests/Generated/Cost/Goal Found")
def compare(algorithms=[PluralityLearner, NaiveBayesLearner, NearestNeighborLearner, DecisionTreeLearner], datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20), Majority(7, 100), Parity(7, 100), Xor(100)], k=10, trials=1): """Compare various learners on various datasets using cross-validation. Print results as a table.""" print_table([[a.__name__.replace('Learner', '')] + [cross_validation(a, d, k, trials) for d in datasets] for a in algorithms], header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
def compare_searchers(problems, header, searchers=[breadth_first_tree_search, breadth_first_search, depth_first_graph_search, iterative_deepening_search, depth_limited_search, recursive_best_first_search]): def do(searcher, problem): p = InstrumentedProblem(problem) searcher(p) return p table = [[name(s)] + [do(s, p) for p in problems] for s in searchers] print_table(table, header)
def get_data(url, cookie): import mechanize try: process = mechanize.Browser() process.addheaders = [('Cookie', cookie)] process.set_handle_robots(False) process.open(url) if process.geturl() == "http://ma.playboy.tv/access/login": utils.printf("Got login page! Check your cookie", "bad") sys.exit(1) else: resp = process.response().read() link_info = json.loads(parse_data(resp)) utils.print_table(("Quality", "Video URL"), *link_info.items()) quality, best_format = best_video_quality(link_info) dload_link = link_info[best_format] utils.printf(dload_link, "good") utils.printf("Do you want to save this video? [Y]") option = raw_input() if option == "Y" or option == "y": if url[-1] == "/": dload_name = "%s_%s.%s" % ( url.split('/')[-2], quality, re.findall("%s.(.*)\?" % (best_format), dload_link, re.MULTILINE)[0]) else: dload_name = "%s_%s.%s" % ( url.split('/')[-1], quality, re.findall("%s.(.*)\?" % (best_format), dload_link, re.MULTILINE)[0]) utils.printf("Saving to %s" % (dload_name), 'good') utils.printf("Downloading, please wait....") try: process.retrieve(dload_link, dload_name) utils.printf("Download completed!", "good") except: utils.printf("Error while downloading file", "bad") else: utils.printf("You can download your video manually") except KeyboardInterrupt: utils.printf("Terminated by user!", "bad") sys.exit(1) except: utils.printf("Error while getting data", "bad") sys.exit(0) finally: process.close()
def get_attr(self, instance_id, attr): instances = list(self._for_all_instances()) instance_ids = [ instance.id for instance in instances ] if instance_id and (instance_id not in instance_ids): error("Invalid id: %s" % instance_id) elif instance_id: instance = instances[ instance_ids.index(instance_id) ] print getattr(instance, attr) else: data = [('Instance ID', attr)] for instance_id, instance in zip(instance_ids, instances): data.append((instance_id, getattr(instance, attr))) print_table(data)
def run(options, creds): social_urls = data.social_urls().replace("\t", "").split("\n") for url in social_urls: if options.url in url: social_urls.remove(url) result = Queue() #workers = [] try: for tryCreds in creds: for url in social_urls: submit(url, options, tryCreds, result) # if len(workers) == options.threads: # do_job(workers) # del workers[:] # worker = threading.Thread( # target = submit, # args = (url, options, tryCreds, result) # ) #worker.daemon = True #workers.append(worker) #do_job(workers) #del workers[:] except KeyboardInterrupt: utils.printf("[!] Terminated by user! WhAt ThE f**K!", "bad") import os os._exit(0) except SystemExit: utils.die("[!] Terminated by system! WhAt ThE f**K!", "SystemExit") except Exception as err: utils.die("[!] ReAuth: Runtime error", err) finally: result = list(result.queue) if len(result) == 0: utils.printf("[-] No other valid passwords found.. :(", "bad") else: utils.print_table(("Target", "Username", "Password"), *result)
def run(options, creds): social_urls = data.social_urls().replace("\t", "").split("\n") for url in social_urls: if options.url in url: social_urls.remove(url) result = Queue() # workers = [] try: for tryCreds in creds: for url in social_urls: submit(url, options, tryCreds, result) # if len(workers) == options.threads: # do_job(workers) # del workers[:] # worker = threading.Thread( # target = submit, # args = (url, options, tryCreds, result) # ) # worker.daemon = True # workers.append(worker) # do_job(workers) # del workers[:] except KeyboardInterrupt: events.error("Terminated by user", "STOPPED") sys.exit(1) except SystemExit: events.error("Terminated by system", "STOPPED") except Exception as error: events.error("%s" % (error), "REAUTH") sys.exit(1) finally: result = list(result.queue) if len(result) == 0: events.error("No valid account found", "RESULT") else: from utils import print_table print_table(("Target", "Username", "Password"), *result)
def compare_searchers(problems, header, searchers=[breadth_first_tree_search, breadth_first_search, depth_first_graph_search, iterative_deepening_search, depth_limited_search, recursive_best_first_search]): def do(searcher, problem): p = InstrumentedProblem(problem) start_time = time.perf_counter_ns() searcher(p) end_time = time.perf_counter_ns() return (p, str((end_time-start_time)/1000000000)) table = [[name(s)] + [do(s, p) for p in problems] for s in searchers] print_table(table, header)
def SGD_train_output_table(self, train_datas, test_datas, epoch=5, step=5): counter = 0 results = [] start = time.time() print(f'Start train, epoch = {epoch}') for i in range(epoch): print(f'Start epoch{i}') counter += 1 if counter % step == 0: self.SGD_train(train_datas, step) results.append(self.test(test_datas)) U.print_table(results, step) end = time.time() # self.test(test_datas, True) # print test data print(f'Epoch count: {epoch}, Train time: {end - start} seconds') return results
def _show_evasions(self, *args, **kwargs): headers = ("Name", "Platform", "Description") info = [[ "MHA", "Windows / Linux", "[PhantomEvasion] Multipath HeapAlloc (C)" ], ["MVA", "Windows", "[PhantomEvasion] Multipath VirtualAlloc (C)"], [ "Polymorphic_MHA", "Windows / Linux", "[PhantomEvasion] Polymorphic HeapAlloc (C)" ], [ "Polymorphic_MVA", "Windows", "[PhantomEvasion] Polymorphic VirtualAlloc (C)" ]] utils.print_table(headers, *info) print('')
def solve(p): print('letters:', p.get('letters', None)) print('total_score:', p.get('total_score', '')) print('word_count:', p.get('word_count', '')) print('pangram(s):', ', '.join(p.get('pangram_list', []))) print() # print all answers for x in p.get('word_list', []): score = x.get('score') # add 7 points if word is pangram if x.get('word') in p.get('pangram_list', []): score += +7 utils.print_table((x.get('word'), score), 2, 10) return
def print_badges(results, min_i, max_i): """Prints the formatted results from a list of badges Args: results ([badge row]): The list of badge rows min_i (int): The minimum index of the printed results range (inclusive) max_i (int): The maximum index of the printed results range (exclusive) """ # Get table header = ["i", "name", "type"] table, widths = get_table_info(results[min_i:max_i], header, index_start=min_i + 1) # Start indices at 1 # Print the table width_str = "{{:>{}}} {{:{}}} {{:{}}}".format(*widths) print_table(table, width_str, widths) print("")
def max_profits(k, prices): num_rows = k + 1 # k +1 to start with a 0 row num_cols = len(prices) t = [] for i in range(num_rows): t.append([None for j in range(num_cols)]) print_table(t) for j in range(num_cols): t[0][j] = 0 for i in range(num_rows): t[i][0] = 0 print('') print_table(t)
def do_status(self, lb_name): """ Shows the status of all the instances in the load balancer """ # XXX get the ec2 context for the cache, if it exits. This is needed to # get the instance names. from botosh.aws_admin import _context_cache from botosh import available_contexts if 'ec2' not in _context_cache: ec2_context = available_contexts['ec2']() else: ec2_context = _context_cache['ec2'] ec2_conn = ec2_context.conn lb_name = lb_name if lb_name else self.connected_to data = [("Instance Name", "Instance Id", "Status")] for instance_state in self.conn.describe_instance_health(lb_name): for reservation in ec2_conn.get_all_instances([instance_state.instance_id]): for instance in reservation.instances: data.append((instance.tags.get('Name', ''), instance_state.instance_id, instance_state.state)) print_table(data)
def print_board(self): """ Print the board. """ if self.silent: return if self.matrix is None: self.move_cnt = 0 self.matrix = get_static_board_layout(self.things, self.width, self.height) #else: # print chr(27) + "[2J" print 'Move {}'.format(self.move_cnt) self.move_cnt += 1 agent = self.agents[0] xloc, yloc = agent.location try: self.matrix[len(self.matrix) - yloc][xloc - 1] = agent except: print self.height, yloc, xloc print_table(self.matrix, sep='') self.matrix[len(self.matrix) - yloc][xloc - 1] = '@' print ''
def main(): global MIN_SUPPORT_VALUE # preprocessing config_data = get_config_info() MIN_SUPPORT_VALUE = int(config_data['min_support']) filename = config_data['filename'] transactions = get_transactions(filename) location_time_star_items = get_location_time_star_items(transactions) # base itemset two_items_itemsets = get_two_items_itemsets(transactions) # running apriori on base itemset t1 = datetime.datetime.now() final_itemsets = get_final_itemsets(two_items_itemsets, transactions) star_itemsets = get_star_itemsets(final_itemsets, location_time_star_items) zero_star_itemsets = star_itemsets[0] one_star_itemsets = star_itemsets[1] two_star_itemsets = star_itemsets[2] t2 = datetime.datetime.now() total_time = t2-t1 print(20*'*') print('Total time taken in (microseconds) by apriori algorithm:', total_time.microseconds) print('Min support value:', MIN_SUPPORT_VALUE) print(20*'*') print_table(zero_star_itemsets, 'Itemsets for CMB') print_table(one_star_itemsets, 'Itemsets for 1 star CMP') print_table(two_star_itemsets, 'Itemsets for 2 star')
def knapsack(items, capacity): num_rows = len(items) + 1 num_cols = capacity + 1 t = [] for i in range(num_rows): row = [] for j in range(num_cols): row.append(None) t.append(row) for i in range(num_rows): #set the first column of every row to 0 t[i][0] = 0 for j in range(num_cols): t[0][j] = 0 print('items: ', items) for i in range(1, num_rows): value = items[i - 1][1] weight = items[i - 1][0] print('value: ' , value) print('weight: ', weight) for j in range(1, num_cols): if j >= weight: t[i][j] = max( value + t[i - 1][j - weight], t[i - 1][j]) else: #detemine if choice is even valid #if the item is too heavy #keep best value from previous item t[i][j] = t[i - 1][j] print_table(t) return t[num_rows - 1][num_cols - 1]
def main(): config_data = get_config_info() MIN_SUPPORT_VALUE = int(config_data['min_support']) filename = config_data['filename'] transactions = get_preprocessed_data(filename) location_time_star_items = get_location_time_star_items(transactions) t1 = datetime.datetime.now() final_itemsets = find_frequent_patterns_by_location_time( transactions, MIN_SUPPORT_VALUE) star_itemsets = get_star_itemsets(final_itemsets, location_time_star_items) zero_star_itemsets = star_itemsets[0] one_star_itemsets = star_itemsets[1] two_star_itemsets = star_itemsets[2] t2 = datetime.datetime.now() total_time = t2 - t1 print(20 * '*') print('Total time taken in (microseconds) by fptree algorithm:', total_time.microseconds) print('Min support value:', MIN_SUPPORT_VALUE) print(20 * '*') print_table(zero_star_itemsets, 'Itemsets for CMB') print_table(one_star_itemsets, 'Itemsets for 1 star CMP') print_table(two_star_itemsets, 'Itemsets for 2 star')
def main(): global MIN_SUPPORT_VALUE # preprocessing config_data = get_config_info() MIN_SUPPORT_VALUE = int(config_data['min_support']) filename = config_data['filename'] transactions = get_transactions(filename) location_time_star_items = get_location_time_star_items(transactions) hash_ids = get_hash_ids(location_time_star_items) rev_hash_ids = get_rev_hash_ids(hash_ids) itemsets_by_hash_id = get_itemsets_by_hash_id(transactions, hash_ids) # base itemset two_items_itemsets_by_hash_id = get_two_items_itemsets_by_hash_id( itemsets_by_hash_id) # running spatio temporal apriori on base itemset t1 = datetime.datetime.now() final_itemsets_by_hash_id = get_final_itemsets_by_hash_id( two_items_itemsets_by_hash_id, itemsets_by_hash_id) star_itemsets_by_hash_id = get_star_itemsets_by_hash_id( final_itemsets_by_hash_id, location_time_star_items, hash_ids, rev_hash_ids) one_star_itemsets_by_hash_id = star_itemsets_by_hash_id[0] two_star_itemsets_by_hash_id = star_itemsets_by_hash_id[1] t2 = datetime.datetime.now() total_time = t2 - t1 print(20 * '*') print( 'Total time taken in (microseconds) by Hash Based Spatio-Temporal(HBST) algorithm:', total_time.microseconds) print('Min support value:', MIN_SUPPORT_VALUE) print(20 * '*') print_table(final_itemsets_by_hash_id, rev_hash_ids, 'Itemsets for CMB') print_table(one_star_itemsets_by_hash_id, rev_hash_ids, 'Itemsets for 1 star CMP') print_table(two_star_itemsets_by_hash_id, rev_hash_ids, 'Itemsets for 2 star')
# Get list of test cases test_cases = sorted([f.split(".")[0] for f in glob.glob("*.ufl")]) # Open logfile logfile = open("bench.log", "w") # Iterate over options os.chdir("../test/regression") table = {} for (j, test_option) in enumerate(test_options): # Run benchmark print("\nUsing options %s\n" % test_option) os.system(sys.executable + " test.py --bench %s" % test_option) # Collect results for (i, test_case) in enumerate(test_cases): output = open("output/%s.out" % test_case).read() lines = [line for line in output.split("\n") if "bench" in line] if not len(lines) == 1: raise RuntimeError("Unable to extract benchmark data for test case %s" % test_case) timing = float(lines[0].split(":")[-1]) table[(i, j)] = (test_case, test_option, timing) logfile.write("%s, %s, %g\n" % (test_case, test_option, timing)) # Close logfile logfile.close() # Print results print_table(table, "FFC bench")
def show_history(self): term_width = shutil.get_terminal_size().columns id_width = max(2, self.id_width) + 1 struct = utils.get_history_struct(id_width, term_width > WIDE_HIST_THRESHOLD) utils.print_table(struct, self.tasks, term_width)