def getPorts(): print "Distinct ports attacked: " + executeQuery("db.session.distinct('destination_port').length") if verbose or veryVerbose: portList = executeQuery("db.session.aggregate({\$group:{_id:'\$destination_port','count':{\$sum:1}}},{\$sort:{count:-1}}).forEach(function(x){printjson(x)})").split('\n') for pair in portList: match = re.search(r'"_id" : (\d+), "count" : (\d+) }',pair) if match: countByPort[match.group(1)] = int(match.group(2)) print figlet_format('Ports', font='small') graph = Pyasciigraph() for line in graph.graph('', sorted(countByPort.items(), key=operator.itemgetter(1), reverse=True)): print(line) print else: portList = executeQuery("db.session.aggregate({\$group:{_id:'\$destination_port','count':{\$sum:1}}},{\$sort:{count:-1}},{\$limit:10}).forEach(function(x){printjson(x)})").split('\n') for pair in portList: match = re.search(r'"_id" : (\d+), "count" : (\d+) }',pair) if match: countByPort[match.group(1)] = int(match.group(2)) print figlet_format('Ports ( Top 10 )', font='small') graph = Pyasciigraph() for line in graph.graph('', sorted(countByPort.items(), key=operator.itemgetter(1), reverse=True)): print(line) print
def output(self, args, begin_ns, end_ns, final=0): count = 0 limit = args.top graph = Pyasciigraph() values = [] print('%s to %s' % (ns_to_asctime(begin_ns), ns_to_asctime(end_ns))) for tid in sorted(self.state.tids.values(), key=operator.attrgetter('allocated_pages'), reverse=True): values.append( ("%s (%d)" % (tid.comm, tid.tid), tid.allocated_pages)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph("Per-TID Memory Allocations", values, unit=" pages"): print(line) values = [] count = 0 for tid in sorted(self.state.tids.values(), key=operator.attrgetter('freed_pages'), reverse=True): values.append(("%s (%d)" % (tid.comm, tid.tid), tid.freed_pages)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph("Per-TID Memory Deallocation", values, unit=" pages"): print(line) print("\nTotal memory usage:\n- %d pages allocated\n- %d pages freed" % (self.state.mm["allocated_pages"], self.state.mm["freed_pages"]))
def output(self, args, begin_ns, end_ns, final=0): count = 0 limit = args.top graph = Pyasciigraph() values = [] print('%s to %s' % (ns_to_asctime(begin_ns), ns_to_asctime(end_ns))) for tid in sorted(self.state.tids.values(), key=operator.attrgetter('allocated_pages'), reverse=True): values.append(("%s (%d)" % (tid.comm, tid.tid), tid.allocated_pages)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph("Per-TID Memory Allocations", values, unit=" pages"): print(line) values = [] count = 0 for tid in sorted(self.state.tids.values(), key=operator.attrgetter('freed_pages'), reverse=True): values.append(("%s (%d)" % (tid.comm, tid.tid), tid.freed_pages)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph("Per-TID Memory Deallocation", values, unit=" pages"): print(line) print("\nTotal memory usage:\n- %d pages allocated\n- %d pages freed" % (self.state.mm["allocated_pages"], self.state.mm["freed_pages"]))
def output(self, args, begin_ns, end_ns, final=0): count = 0 limit = args.top total_ns = end_ns - begin_ns graph = Pyasciigraph() values = [] print('%s to %s' % (ns_to_asctime(begin_ns), ns_to_asctime(end_ns))) for tid in sorted(self.state.tids.values(), key=operator.attrgetter('cpu_ns'), reverse=True): if len(args.proc_list) > 0 and tid.comm not in args.proc_list: continue pc = float("%0.02f" % ((tid.cpu_ns * 100) / total_ns)) if tid.migrate_count > 0: migrations = ", %d migrations" % (tid.migrate_count) else: migrations = "" values.append(("%s (%d)%s" % (tid.comm, tid.tid, migrations), pc)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph("Per-TID CPU Usage", values, unit=" %"): print(line) values = [] total_cpu_pc = 0 for cpu in sorted(self.state.cpus.values(), key=operator.attrgetter('cpu_ns'), reverse=True): cpu_pc = float("%0.02f" % cpu.cpu_pc) total_cpu_pc += cpu_pc values.append(("CPU %d" % cpu.cpu_id, cpu_pc)) for line in graph.graph("Per-CPU Usage", values, unit=" %"): print(line) print("\nTotal CPU Usage: %0.02f%%\n" % (total_cpu_pc / len(self.state.cpus.keys())))
def output(self, args, begin_ns, end_ns, final=0): count = 0 limit = args.top total_ns = end_ns - begin_ns graph = Pyasciigraph() values = [] print('%s to %s' % (ns_to_asctime(begin_ns), ns_to_asctime(end_ns))) for tid in sorted(self.tids.values(), key=operator.attrgetter('cpu_ns'), reverse=True): if len(args.proc_list) > 0 and tid.comm not in args.proc_list: continue pc = float("%0.02f" % ((tid.cpu_ns * 100) / total_ns)) values.append(("%s (%d)" % (tid.comm, tid.tid), pc)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph("Per-TID CPU Usage", values): print(line) values = [] nb_cpu = len(self.cpus.keys()) for cpu in sorted(self.cpus.values(), key=operator.attrgetter('cpu_ns'), reverse=True): cpu_total_ns = cpu.cpu_ns cpu_pc = float("%0.02f" % cpu.cpu_pc) values.append(("CPU %d" % cpu.cpu_id, cpu_pc)) for line in graph.graph("Per-CPU Usage", values): print(line)
def _print_results(self, begin_ns, end_ns, final=0): count = 0 limit = self._arg_limit graph = Pyasciigraph() values = [] self.state = self._automaton.state alloc = 0 freed = 0 print('Timerange: [%s, %s]' % (common.ns_to_hour_nsec( begin_ns, gmt=self._arg_gmt, multi_day=True), common.ns_to_hour_nsec( end_ns, gmt=self._arg_gmt, multi_day=True))) for tid in sorted(self.state.tids.values(), key=operator.attrgetter('allocated_pages'), reverse=True): if not self.filter_process(tid): continue values.append( ("%s (%d)" % (tid.comm, tid.tid), tid.allocated_pages)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph("Per-TID Memory Allocations", values, unit=" pages"): print(line) values = [] count = 0 for tid in sorted(self.state.tids.values(), key=operator.attrgetter('freed_pages'), reverse=True): if not self.filter_process(tid): continue values.append(("%s (%d)" % (tid.comm, tid.tid), tid.freed_pages)) count = count + 1 freed += tid.freed_pages if limit > 0 and count >= limit: break for line in graph.graph("Per-TID Memory Deallocation", values, unit=" pages"): print(line) for tid in sorted(self.state.tids.values(), key=operator.attrgetter('allocated_pages'), reverse=True): if not self.filter_process(tid): continue alloc += tid.allocated_pages for tid in sorted(self.state.tids.values(), key=operator.attrgetter('freed_pages'), reverse=True): if not self.filter_process(tid): continue freed += tid.freed_pages print("\nTotal memory usage:\n- %d pages allocated\n- %d pages freed" % (alloc, freed))
def summary(request): P = 15 def fmt_ms(s): return ('{:.3f}ms'.format(1000 * s)).rjust(P) r = http.HttpResponse() r.write(''.ljust(P)) r.write('TIMES'.rjust(P)) r.write('AVERAGE'.rjust(P)) r.write('MEDIAN'.rjust(P)) r.write('STDDEV'.rjust(P)) r.write('\n') avgs = [] medians = [] for CACHE in settings.CACHE_NAMES: data = caches[CACHE].get('benchmarking') if data is None: r.write('Nothing for {}\n'.format(CACHE)) else: # Always chop off the first 10 measurements because it's usually # way higher than all the others. That way we're only comparing # configurations once they're all warmed up data = data[10:] median, avg, stddev = _stats(data) avgs.append((CACHE, avg * 1000)) medians.append((CACHE, median * 1000)) r.write('{}{}{}{}{}\n'.format( CACHE.ljust(P), str(len(data)).rjust(P), fmt_ms(avg), fmt_ms(median), fmt_ms(stddev), )) r.write('\n') graph = Pyasciigraph(float_format='{0:,.3f}') for line in graph.graph('Best Averages (shorter better)', avgs): print(line, file=r) for line in graph.graph('Best Medians (shorter better)', medians): print(line, file=r) print('\n', file=r) sizes = [] for name in settings.CACHE_NAMES: connection = get_redis_connection(name) sizes.append((name, connection.strlen(":1:benchmarking"))) graph = Pyasciigraph(human_readable='si', ) for line in graph.graph('Size of Data Saved (shorter better)', sizes): print(line, file=r) print('\n', file=r) return r
def calculate_text_coverage(self): """ Prints CLI stats about percentage of matched dbpedia facts in wiki raw text. """ matched_count = self.count_matches() total_count = {} for entity, relation_types in self.dbpedia.iteritems(): for relation, values in relation_types.iteritems(): target_resources = values.get('resources', []) total_count.setdefault(relation, 0) total_count[relation] += len(target_resources) occurrence_count = {} for relation in total_count: occurrence_count[relation] = { 'total': total_count[relation], 'matched': min(total_count[relation], matched_count.setdefault(relation, 0)) } # there might be more occurrences of a fact in an article, thus, resulting in a coverage above 100% # print bar chart data = [ ('% ' + str(vals['matched']) + '/' + str(vals['total']) + ' ' + rel.split('/')[-1], vals['matched'] / vals['total'] * 100) for rel, vals in occurrence_count.iteritems() ] graph = Pyasciigraph() for line in graph.graph('occurred facts in percentage', data): print(line)
def twitterstat(account, tweetid): d = webdriver.Remote(command_executor="http://127.6.138.129:15002", desired_capabilities=webdriver.DesiredCapabilities.PHANTOMJS) L = "https://twitter.com/"+account+"/status/"+tweetid d.get(L) soup = BeautifulSoup(d.page_source) try: tweet = soup.find('p', {'class':'TweetTextSize TweetTextSize--26px js-tweet-text tweet-text'}).text retweets = soup.find('ul', {'class':'stats'}).find('li', {'class':'js-stat-count js-stat-retweets stat-count'}).find('a').find('strong').text likes = soup.find('ul', {'class':'stats'}).find('li', {'class':'js-stat-count js-stat-favorites stat-count'}).find('a').find('strong').text if "," in retweets: retweets = retweets.replace(',', '') if "," in likes: likes = likes.replace(',', '') chart = [('retweets', int(retweets)), ('likes', int(likes))] gr = Pyasciigraph() show = gr.graph(tweet,chart) return "\033[1;31m%s\n\033[1;34m%s\n%s\033[m\n" %(show[0], show[2], show[3]) except: try: tweet = soup.find('p', {'class':'TweetTextSize TweetTextSize--26px js-tweet-text tweet-text'}).text return "\033[1;31m%s\033[m\n" %tweet except: return "\033[1;31mCould not find tweet\033[m\n"
def test_human_readable_si(self): test = [('long_labe☭', 1234), ('sl', 1231234), ('line3', 1231231234), ('line4', 1231231231234), ('line5', 1231231231231234), ('line6', 1231231231231231234), ('line7', 1231231231231231231234), ('line8', 1231231231231231231231234), ('line9', 123231231231231231231231234)] graph = Pyasciigraph(human_readable='si') res = graph.graph('☭test print', test) expected = [ '☭test print', '###############################################################################', ' 1K long_labe☭', ' 1M sl ', ' 1G line3 ', ' 1T line4 ', ' 1P line5 ', ' 1E line6 ', ' 1Z line7 ', ' 1Y line8 ', '█████████████████████████████████████████████████████████████ 123Y line9 ', ] gprint(res) gprint(expected) assert res == expected
def graph_thresholds(test_data): # H color test # Multicolor on one line print('\nMultiColor example:') # Color lines according to Thresholds thresholds = { 51: Gre, 100: Blu, 350: Yel, 500: Red, } data = hcolor(test_data, thresholds) # graph with colors, power of 1000, different graph symbol, # float formatting and a few tweaks graph = Pyasciigraph( line_length=120, min_graph_length=50, separator_length=4, multivalue=True, human_readable='si', graphsymbol='*', # comment out if you want to use solid bars float_format='{0:,.2f}', force_max_value=2000, ) for line in graph.graph(label='With Thresholds', data=data): print(line) return graph, data
def iotop_output_write(self): count = 0 limit = self._arg_limit graph = Pyasciigraph() values = [] for tid in sorted(self.state.tids.values(), key=operator.attrgetter('write'), reverse=True): if not self.filter_process(tid): continue info_fmt = "{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown " values.append((info_fmt.format( common.convert_size(tid.write, padding_after=True), "%s (%d)" % (tid.comm, tid.pid), common.convert_size(tid.disk_write, padding_after=True), common.convert_size(tid.net_write, padding_after=True), common.convert_size(tid.unk_write, padding_after=True)), tid.write)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph('Per-process I/O Write', values, with_value=False): print(line)
def print_charts(dataset, title, weekday=False): """ Prints nice charts based on a dict {(key, value), ...} """ chart = [] keys = sorted(dataset.keys()) mean = numpy.mean(list(dataset.values())) median = numpy.median(list(dataset.values())) for key in keys: if (dataset[key] >= median * 1.33): displayed_key = "%s (\033[92m+\033[0m)" % (int_to_weekday(key) if weekday else key) elif (dataset[key] <= median * 0.66): displayed_key = "%s (\033[91m-\033[0m)" % (int_to_weekday(key) if weekday else key) else: displayed_key = (int_to_weekday(key) if weekday else key) chart.append((displayed_key, dataset[key])) thresholds = { int(mean): Gre, int(mean * 2): Yel, int(mean * 3): Red, } data = hcolor(chart, thresholds) graph = Pyasciigraph( separator_length=4, multivalue=False, human_readable='si', ) for line in graph.graph(title, data): print(line) print("")
def plot(self): graph = Pyasciigraph() print() for line in graph.graph(self.name, list(self.data.items())): print(line) print() print("-" * 80)
def test_neg_multicolor(self): test = [('testval0', 600), ('testval1', 400, Red), ('testval2', [(600, Gre), (500, Blu)]), ('testval3', [(200, Yel), (100, )]), ('testval4', -170, Cya), ('testval5', 50, Blu), ('testval6', [(-300, Gre), (-230, Red)]), ('testval7', [(-100, Gre), (-230, Red), (200, Yel), (600, Blu)])] graph = Pyasciigraph() res = graph.graph('☭test print', test) expected = [ u'\u262dtest print', u'###############################################################################', u' \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 600 testval0', u' \x1b[0;31m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m \x1b[0;31m400\x1b[0m testval1', u' \x1b[0;34m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m\x1b[0;32m\u2588\u2588\u2588\u2588\u2588\x1b[0m \x1b[0;32m600\x1b[0m,\x1b[0;34m500\x1b[0m testval2', u' \u2588\u2588\u2588\u2588\u2588\x1b[0;33m\u2588\u2588\u2588\u2588\u2588\x1b[0m \x1b[0;33m200\x1b[0m,100 testval3', u' \x1b[0;36m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m \x1b[0;36m-170\x1b[0m testval4', u' \x1b[0;34m\u2588\u2588\x1b[0m \x1b[0;34m50\x1b[0m testval5', u' \x1b[0;32m\u2588\u2588\u2588\x1b[0m\x1b[0;31m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m \x1b[0;32m-300\x1b[0m,\x1b[0;31m-230\x1b[0m testval6', u' \x1b[0;31m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m\x1b[0;32m\u2588\u2588\u2588\u2588\u2588\x1b[0m\x1b[0;33m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m\x1b[0;34m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m \x1b[0;32m-100\x1b[0m,\x1b[0;31m-230\x1b[0m,\x1b[0;33m200\x1b[0m,\x1b[0;34m600\x1b[0m testval7' ] gprint(res) gprint(expected) assert res == expected
def iotop_output_print_file_write(self, files): # Compute files read count = 0 limit = self._arg_limit graph = Pyasciigraph() values = [] sorted_f = sorted(files.items(), key=lambda files: files[1]['write'], reverse=True) for f in sorted_f: if f[1]["write"] == 0: continue info_fmt = "{:>10}".format( common.convert_size(f[1]["write"], padding_after=True)) values.append(("%s %s %s" % (info_fmt, f[1]["name"], str(f[1]["other"])[1:-1]), f[1]["write"])) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph('Files Write', values, sort=2, with_value=False): print(line)
def print_graph(dataset, title): graph = Pyasciigraph( separator_length=4, multivalue=False, human_readable='si', ) chart = [] keys = sorted(dataset.keys()) mean = np.mean(list(dataset.values())) # median = np.median(list(dataset.values())) for key in keys: chart.append((key, dataset[key])) if (not args.no_color): thresholds = { int(mean): Gre, int(mean * 2): Yel, int(mean * 3): Red, } data = hcolor(chart, thresholds) else: data = chart for line in graph.graph(title, data): print(line)
def graph_intervals(tl_videos, interval=timedelta(hours=1)): """ Plot ascii frequency of photos per bin """ bins = {} for video in tl_videos: # round bin start start = prev_mark(interval, video.start) end = next_mark(interval, video.end) # generate a list of marks video_extents = list( rrule(SECONDLY, dtstart=start, until=end, interval=int(interval.total_seconds()))) for bin_start in video_extents: images_in_slice = [ im for im in video.images if bin_start <= im.taken < bin_start + interval ] bins[bin_start] = len(images_in_slice) graphable = [] for h in sorted(bins): # print("{}:{}".format(h,freq[h])) graphable.append(tuple((h.isoformat(), bins[h]))) # print (graphable) graph = Pyasciigraph() for line in graph.graph('Frequency per {}'.format(interval), graphable): print(line)
def plot_testsuite_time(json_data, top_k=TOP_K, ascii_graph=False, report_file=None): suite_time = {} overall_time = 0.0 for suite in json_data: name = suite['testsuite']['@name'].rsplit(".",1)[-1] time = float(suite['testsuite']['@time'].replace(',','')) overall_time += time if name in suite_time: total_time = suite_time[name] suite_time[name] = total_time + time else: suite_time[name] = time d_descending = OrderedDict(sorted(suite_time.items(), key=lambda kv: kv[1], reverse=True)) gdata = [] for k,v in take(d_descending.iteritems(), top_k): gdata.append((k, v)) print '\nTop ' + str(top_k) + ' testsuite in terms of execution time (in seconds).. [Total time: ' + str(overall_time) + ' seconds]' if ascii_graph: graph = Pyasciigraph() for line in graph.graph('', gdata): print line else: for line in gdata: print line[0] + "\t" + str(line[1]) if report_file != None: with open(report_file, "w") as f: f.write('Top ' + str(top_k) + ' testsuite in terms of execution time (in seconds).. [Total time: ' + str(overall_time) + ' seconds]\n') for line in gdata: f.write(line[0] + "\t" + str(line[1]) + "\n")
def irq_list_to_freq(self, irq, _min, _max, res, name, nr): step = (_max - _min) / res if step == 0: return buckets = [] values = [] graph = Pyasciigraph() for i in range(res): buckets.append(i * step) values.append(0) for i in irq["list"]: v = (i.stop_ts - i.start_ts) / 1000 b = min(int((v - _min) / step), res - 1) values[b] += 1 g = [] i = 0 for v in values: g.append(("%0.03f" % (i * step + _min), v)) i += 1 for line in graph.graph('Handler duration frequency distribution %s ' '(%s) (usec)' % (name, nr), g, info_before=True, count=True): print(line) print("")
def print_charts(dataset, title, args, weekday=False): chart = [] keys = sorted(dataset.keys()) mean = numpy.mean(list(dataset.values())) median = numpy.median(list(dataset.values())) for key in keys: if dataset[key] >= median * 1.33: displayed_key = "%s (\033[92m+\033[0m)" % (int_to_weekday(key) if weekday else key) elif dataset[key] <= median * 0.66: displayed_key = "%s (\033[91m-\033[0m)" % (int_to_weekday(key) if weekday else key) else: displayed_key = (int_to_weekday(key) if weekday else key) chart.append((displayed_key, dataset[key])) thresholds = { int(mean): Gre, int(mean * 2): Yel, int(mean * 3): Red, } data = hcolor(chart, thresholds) graph = Pyasciigraph( separator_length=4, multivalue=False, human_readable='si', ) for line in graph.graph(title, data): if args.no_color: ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]') line = ansi_escape.sub('', line) print(line) print("")
def show_index(self): graphArray = [] for coin in IndexedCoinModel.select(): graphArray.append((coin.Ticker, coin.DesiredPercentage)) pyGraph = Pyasciigraph(line_length=50, min_graph_length=50, separator_length=4, multivalue=False, human_readable='si', graphsymbol='*', float_format='{0:,.2f}') thresholds = { 15: Gre, 30: Blu, 50: Yel, 60: Red, } data = hcolor(graphArray, thresholds) sys.stdout.write("\n") for line in pyGraph.graph('Index Distribution', data=data): sys.stdout.write(line + "\n") sys.stdout.write("\n")
def output_file_read(self, args): count = 0 limit = args.top graph = Pyasciigraph() values = [] files = {} for tid in self.tids.values(): for fd in tid.fds.values(): if not fd.filename in files.keys(): files[fd.filename] = {} files[fd.filename]["read"] = fd.read files[fd.filename]["write"] = fd.write if fd.filename.startswith("pipe") or \ fd.filename.startswith("socket") or \ fd.filename.startswith("anon_inode"): files[fd.filename]["name"] = "%s (%s)" % (fd.filename, tid.comm) else: files[fd.filename]["name"] = fd.filename files[fd.filename]["other"] = "(%d %d)" % (fd.fd, tid.tid) else: files[fd.filename]["read"] += fd.read files[fd.filename]["write"] += fd.write for f in files.values(): if f["read"] == 0: continue values.append(("%s %s %s" % (f["name"], convert_size(f["read"]), f["other"]), f["read"])) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph('Files Read', values, sort=2): print(line)
def synthesize_trials(block: Block) -> List[dict]: # TODO: Do this in separate thread, and output some kind of progress indicator. json_data = __generate_json_request(block) solutions = cast(List[dict], []) # Make sure the local image is up-to-date. update_docker_image("sweetpea/server") # 1. Start a container for the sweetpea server, making sure to use -d and -p to map the port. container = start_docker_container("sweetpea/server", 8080) # 2. POST to /experiments/generate using the backend request json as the body. # TOOD: Do this in separate thread, and output some kind of progress indicator. print("Sending formula to backend... ", end='', flush=True) t_start = datetime.now() try: __check_server_health() experiments_request = requests.post( 'http://localhost:8080/experiments/generate', data=json_data) if experiments_request.status_code != 200 or not experiments_request.json( )['ok']: tmp_filename = "" with tempfile.NamedTemporaryFile(delete=False) as f: f.write(str.encode(json_data)) tmp_filename = f.name raise RuntimeError( "Received non-200 response from experiment generation! LowLevelRequest body saved to temp file '" + tmp_filename + "' status_code=" + str(experiments_request.status_code) + " response_body=" + str(experiments_request.text)) solutions = experiments_request.json()['solutions'] t_end = datetime.now() print(str((t_end - t_start).seconds) + "s") # 3. Stop and then remove the docker container. finally: stop_docker_container(container) # 4. Decode the results result = list(map(lambda s: __decode(block, s['assignment']), solutions)) # Dump histogram of frequency distribution, just to make sure it's somewhat even. print() print("Found " + str(len(solutions)) + " distinct solutions.") print() hist_data = [("Solution #" + str(idx + 1), sol['frequency']) for idx, sol in enumerate(solutions)] hist_data.sort(key=lambda tup: tup[1], reverse=True) graph = Pyasciigraph() for line in graph.graph('Most Frequently Sampled Solutions', hist_data[:15]): print(line) return result
def makeGraph(fList): docs = 0 vids = 0 songs = 0 images = 0 others = 0 compressed = 0 codess = 0 ll = len(fList) if (oss == 1): print("Total no of files in system: ", ll) else: print("Total no of files in user home: ", ll) for file in fList: if file.name.lower().endswith('.pdf') or file.name.lower().endswith( '.docx') or file.name.lower().endswith( '.doc') or file.name.lower().endswith('.txt'): docs += 1 if file.name.lower().endswith('.mp4') or file.name.endswith( '.mkv') or file.name.endswith('.avi'): vids += 1 if file.name.lower().endswith('.jpeg') or file.name.endswith( '.png') or file.name.endswith('.jpg') or file.name.endswith( '.gif'): images += 1 if file.name.lower().endswith('.mp3') or file.name.endswith( '.ogg') or file.name.endswith('.wav'): songs += 1 if file.name.endswith('.apk') or file.name.endswith( '.jar') or file.name.endswith('.exe') or file.name.endswith( '.iso') or file.name.endswith( '.dmg') or file.name.endswith( '.csv') or file.name.endswith( '.log') or file.name.endswith('.db'): others += 1 if file.name.lower().endswith('.zip') or file.name.endswith( '.7z') or file.name.endswith('.deb') or file.name.endswith( '.tar.gz') or file.name.endswith('.rpm'): compressed += 1 if file.name.endswith('.c') or file.name.endswith( '.py') or file.name.endswith('.java') or file.name.endswith( '.cpp'): codess += 1 data = [('docs', docs), ('songs', songs), ('videos', vids), ('images', images), ('codes', codess), ("compressed", compressed), ('others', others)] pattern = [Gre, Yel, Red] data = vcolor(data, pattern) graph = Pyasciigraph() for line in graph.graph('Files on PC', data): print(line)
def print_distribution_graph(data, title): y = np.bincount(data) ii = np.nonzero(y)[0] dist = [(str(x[0]), x[1]) for x in zip(ii, y[ii])] graph = Pyasciigraph(human_readable='si') for line in graph.graph(title, dist): print line print ""
def histogram(data, name): name = 'count of %s response time by week' % name g = Pyasciigraph() buckets = defaultdict(int) for item in data: buckets[int(item // (86400 * 7))] += 1 for line in g.graph(name, ((k, v) for k, v in buckets.iteritems())): print line
def dumptAll(self, context=""): global distriDuration self.logger.info("Dump all perfmon recorded timings") with open(myConfig['DEBUG']['PARTS']['PERFMON']['FILE'], "a+") as myfile: for part in distriDuration: sorted_distriDuration = self.getSortedDuration(part) #self.logger.info('Timing for parts :'+part) #self.logger.info (sorted_distriDuration) graph = Pyasciigraph(graphsymbol='#') myfile.write("----------------------- {}\n".format(context)) myfile.write("Duration Timing for parts : {}\n".format(part)) for line in graph.graph(part, sorted_distriDuration): myfile.write("{}\n".format( line.encode('ascii', 'ignore').decode('utf-8'))) totalDur = 0 totalNb = 0 for measure in distriDuration[part].items(): totalDur = totalDur + (measure[0] * measure[1]) totalNb = totalNb + measure[1] myfile.write("Average {} ms/processing\n".format(totalDur / totalNb)) #self.logger.info(line.encode('ascii','ignore').decode('utf-8')) for part in distriCycle: sorted_distriCycle = self.getSortedCycle(part) graph = Pyasciigraph(graphsymbol='#') myfile.write("----------------------- {}\n".format(context)) myfile.write("Cycle Timing for parts : {}\n".format(part)) for line in graph.graph(part, sorted_distriCycle): myfile.write("{}\n".format( line.encode('ascii', 'ignore').decode('utf-8'))) totalDur = 0 totalNb = 0 for measure in distriCycle[part].items(): totalDur = totalDur + (measure[0] * measure[1]) totalNb = totalNb + measure[1] myfile.write("Averazge {} events/s\n".format( 1000 / (totalDur / totalNb))) myfile.close() with open(myConfig['DEBUG']['PARTS']['TRACER']['FILE'], "a+") as myfile: myfile.write("----------------------- {}\n".format(context)) for evt in timeline: myfile.write("{0} {1} {2} {3}\n".format( evt['ts'], evt['thread'], evt['tag'], evt['state'])) myfile.close()
def print_distribution_graph(data, title): y = np.bincount(data) ii = np.nonzero(y)[0] dist = [(str(x[0]), x[1]) for x in zip(ii,y[ii])] graph = Pyasciigraph(human_readable='si') for line in graph.graph(title, dist): print line print ""
def output_latencies(self, args): graph = Pyasciigraph() for proc in self.latency_hist.keys(): values = [] for v in self.latency_hist[proc]: values.append(("%s" % (v[0]), v[1])) for line in graph.graph('%s requests latency (ms)' % proc, values, unit=" ms"): print(line)
def validate(pred, min_period, max_period): graph = Pyasciigraph() num_count = 0 hit_count = 0 spend_count = 0 earned_count = 0 earned_cash = 0 earned_set = list() graph_data = defaultdict(int) for target_period in xrange(min_period, max_period + 1): dataset_begin = min_period dataset_end = target_period - 1 if dataset_begin > dataset_end: continue result = p.predict(target_period=target_period, dataset_begin=dataset_begin, dataset_end=dataset_end) actual_numbers = set(all_data[target_period]['numbers']) predicted_numbers = set(result['numbers']) hit_numbers = actual_numbers & predicted_numbers m_count = len(predicted_numbers) h_count = len(hit_numbers) num_count += m_count hit_count += h_count if m_count > 0 and m_count <= 10: spend_count += 1 if (h_count, m_count) in reward_table: earned_count += 1 earned_cash += reward_table[(h_count, m_count)] earned_set.append((h_count, m_count)) hour = '%s:00' % all_data[target_period]['time'].split(':')[0] graph_data[hour] += 1 print 'predictor: %s' % p.get_name() for line in graph.graph('', graph_data.items(), sort=2): print line.encode('UTF-8') if num_count == 0: print 'hit rate: N/A' else: print 'hit rate: %.6f (%d / %d)' % ((float(hit_count) / num_count), hit_count, num_count) if spend_count == 0: print 'C/P: N/A' else: print 'C/P: %.6f (%d / %d)' % ((float(earned_count) / spend_count), earned_count, spend_count) print 'earned set: %s' % earned_set print 'spend: %d NTD' % (spend_count * 25) print 'earned cash: %d NTD' % earned_cash print
def iotop_output_nr_sector(self): graph = Pyasciigraph() values = [] for disk in sorted(self.state.disks.values(), key=operator.attrgetter('nr_sector'), reverse=True): if disk.nr_sector == 0: continue values.append((disk.prettyname, disk.nr_sector)) for line in graph.graph('Disk nr_sector', values, unit=" sectors"): print(line)
def print_stats_diagram(self, total_entries): data = [] graph = Pyasciigraph(separator_length=4) for resolver_name in sorted(self.resolver_names): item = (resolver_name, self.statistics[resolver_name]) data.append(item) item = ('TOTAL', total_entries) data.append(item) for line in graph.graph('Blocking Statistics:', data): print(line)
def test_type_output(self): test = [('long_labe☭', 423), ('sl', 1234), ('line3', 531), ('line4', 200), ('line5', 834)] graph = Pyasciigraph() res = graph.graph('test print', test) if sys.version < '3': expected = unicode else: expected = str for line in res: assert type(line) == expected
def print_graph(header, data, is_bytes=False): graph = Pyasciigraph(float_format='{:,.1f}', min_graph_length=20, separator_length=1, line_length=shutil.get_terminal_size( (DEFAULT_TERMINAL_WIDTH, 20)).columns, human_readable='cs' if is_bytes else None) for line in graph.graph(header, data): print(line) print()
def _print_results(self, begin_ns, end_ns, final=0): # print('event count: {}'.format(self._analysis.event_count)) count = 0 limit = self._arg_limit total_ns = end_ns - begin_ns graph = Pyasciigraph() values = [] print('Timerange: [%s, %s]' % (common.ns_to_hour_nsec( begin_ns, gmt=self._arg_gmt, multi_day=True), common.ns_to_hour_nsec( end_ns, gmt=self._arg_gmt, multi_day=True))) for tid in sorted(self.state.tids.values(), key=operator.attrgetter('cpu_ns'), reverse=True): if self._arg_proc_list and tid.comm not in self._arg_proc_list: continue if tid.tid == 0: continue pc = float("%0.02f" % ((tid.cpu_ns * 100) / total_ns)) if tid.migrate_count > 0: migrations = ", %d migrations" % (tid.migrate_count) else: migrations = "" values.append(("%s (%d)%s" % (tid.comm, tid.tid, migrations), pc)) count = count + 1 if limit > 0 and count >= limit: break for line in graph.graph("Per-TID CPU Usage", values, unit=" %"): print(line) values = [] total_cpu_pc = 0 for cpu in sorted(self.state.cpus.values(), key=operator.attrgetter('cpu_ns'), reverse=True): cpu_pc = float("%0.02f" % cpu.cpu_pc) total_cpu_pc += cpu_pc values.append(("CPU %d" % cpu.cpu_id, cpu_pc)) for line in graph.graph("Per-CPU Usage", values, unit=" %"): print(line) print("\nTotal CPU Usage: %0.02f%%\n" % (total_cpu_pc / len(self.state.cpus.keys())))
def iotop_output_nr_sector(self, args): graph = Pyasciigraph() values = [] for disk in sorted(self.state.disks.values(), key=operator.attrgetter('nr_sector'), reverse=True): if disk.nr_sector == 0: continue values.append((disk.prettyname, disk.nr_sector)) for line in graph.graph('Disk nr_sector', values, unit=" sectors"): print(line)
def output_net_sent_bytes(self, args): count = 0 graph = Pyasciigraph() values = [] for iface in sorted(self.ifaces.values(), key=operator.attrgetter('send_bytes'), reverse=True): values.append(("%s %s" % (convert_size(iface.send_bytes), iface.name), iface.send_bytes)) for line in graph.graph('Network sent_bytes', values): print(line)
def hist(data, label): data, outliers = reject_outliers(data) outlier_msg = "{} outliers rejected: {}" outlier_str = ["{:.3f}".format(x) for x in sorted(list(outliers))] # print(outlier_msg.format(len(outliers), outlier_str)) count, division = np.histogram(data, bins=8) hist_data = [(str(d), c) for d, c in zip(division, count)] graph = Pyasciigraph() for line in graph.graph(label, hist_data): print(line) print()
def output_nr_requests(self, args): count = 0 graph = Pyasciigraph() values = [] for disk in sorted(self.disks.values(), key=operator.attrgetter('nr_requests'), reverse=True): if disk.nr_sector == 0: continue values.append((disk.prettyname, disk.nr_requests)) for line in graph.graph('Disk nr_requests', values): print(line)
def print_histogram(data, label=''): # Work around a bug in ascii_graph. # https://github.com/kakwa/py-ascii-graph/issues/3 histogram = [(str(key), value) \ for (key, value) \ in data] graph = Pyasciigraph() for line in graph.graph(label, histogram): # Encode explicitly to get around this bug: # https://github.com/kakwa/py-ascii-graph/issues/4 print line.encode('utf-8')
def graph_output(self, args, begin_ns, end_ns, final=0): for comm in args.proc_list: graph = Pyasciigraph() values = [] for sec in sorted(self.history.keys()): if comm not in self.history[sec]["proc"].keys(): break pc = float("%0.02f" % ((self.history[sec]["proc"][comm] * 100) / self.history[sec]["total_ns"])) values.append(("%s" % sec_to_hour(sec), pc)) for line in graph.graph("%s CPU Usage" % comm, values, unit=" %"): print(line) graph = Pyasciigraph() values = [] for sec in sorted(self.history.keys()): pc = float("%0.02f" % (self.history[sec]["cpu"])) values.append(("%s" % sec_to_hour(sec), pc)) for line in graph.graph("Total CPU Usage", values, unit=" %"): print(line)
def getHoneypots(): honeypotList = executeQuery("db.session.aggregate({\$group:{_id:'\$honeypot','count':{\$sum:1}}},{\$sort:{count:-1}}).forEach(function(x){printjson(x)})").split('\n') for pair in honeypotList: match = re.search(r'"_id" : "(.*)", "count" : (\d+) }',pair) if match: attacksByHoneypot[match.group(1)] = int(match.group(2)) print figlet_format('Honeypots', font='small') graph = Pyasciigraph() for line in graph.graph('', sorted(attacksByHoneypot.items(), key=operator.itemgetter(1), reverse=True)): print(line) print
def graph_output(self, args, begin_ns, end_ns, final=0): for comm in args.proc_list: graph = Pyasciigraph() values = [] for sec in sorted(self.history.keys()): if not comm in self.history[sec]["proc"].keys(): break pc = float("%0.02f" % ( (self.history[sec]["proc"][comm] * 100) / self.history[sec]["total_ns"])) values.append(("%s" % sec_to_hour(sec), pc)) for line in graph.graph("%s CPU Usage" % comm, values): print(line) graph = Pyasciigraph() values = [] for sec in sorted(self.history.keys()): pc = float("%0.02f" % (self.history[sec]["cpu"])) values.append(("%s" % sec_to_hour(sec), pc)) for line in graph.graph("Total CPU Usage", values): print(line)
def test_mulivalue_color_graphs(self): test = [('testval0', 600), ('testval1', 400, Red), ('testval2', [(300, Gre),(500, Blu)]), ('testval3', [(200, Yel),(100,)]), ('testval4', 100, Cya), ('testval5', 50, Blu), ('testval6', [(100, Gre), (150, Red), (200, Yel), (600, Blu)]) ] graph = Pyasciigraph(separator_length=4) res = graph.graph('test graph', test) expected = ['test graph', '#################################################################################', '\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 600 testval0', '\x1b[0;31m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m \x1b[0;31m400\x1b[0m testval1', '\x1b[0;32m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m\x1b[0;34m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m \x1b[0;32m300\x1b[0m,\x1b[0;34m500\x1b[0m testval2', '\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0;33m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m \x1b[0;33m200\x1b[0m,100 testval3', '\x1b[0;36m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m \x1b[0;36m100\x1b[0m testval4', '\x1b[0;34m\u2588\u2588\u2588\u2588 \x1b[0m \x1b[0;34m50\x1b[0m testval5', '\x1b[0;32m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\x1b[0m\x1b[0;31m\u2588\u2588\u2588\u2588\x1b[0m\x1b[0;33m\u2588\u2588\u2588\u2588\x1b[0m\x1b[0;34m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m\x1b[0;32m100\x1b[0m,\x1b[0;31m150\x1b[0m,\x1b[0;33m200\x1b[0m,\x1b[0;34m600\x1b[0m testval6'] assert res == expected
def output_dev_latency(self, args): count = 0 graph = Pyasciigraph() values = [] for disk in self.disks.values(): if disk.completed_requests == 0: continue total = (disk.request_time / disk.completed_requests) / MSEC_PER_NSEC total = float("%0.03f" % total) values.append(("ms %s" % disk.prettyname, total)) for line in graph.graph('Disk request time/sector', values, sort=2): print(line)
def iotop_output_net_recv_bytes(self, args): graph = Pyasciigraph() values = [] for iface in sorted(self.state.ifaces.values(), key=operator.attrgetter('recv_bytes'), reverse=True): values.append(("%s %s" % (convert_size(iface.recv_bytes), iface.name), iface.recv_bytes)) for line in graph.graph('Network recv_bytes', values, with_value=False): print(line)
def test_color_graphs(self): test = [('testval0', 142), ('testval1', 204, BPur), ('testval2', 501, URed), ('testval3', 103, IRed), ('testval4', 29, BIGre), ('testval5', 19, UYel), ('testval6', 99, ICya), ('testval7', 404, BBlu)] graph = Pyasciigraph() res = graph.graph('test graph', test) expected = ['test graph', '###############################################################################', '\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 142 testval0', '\x1b[1;35m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m\x1b[1;35m204\x1b[0m testval1', '\x1b[4;31m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m\x1b[4;31m501\x1b[0m testval2', '\x1b[0;91m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m\x1b[0;91m103\x1b[0m testval3', '\x1b[1;92m\u2588\u2588\u2588 \x1b[0m \x1b[1;92m29\x1b[0m testval4', '\x1b[4;33m\u2588\u2588 \x1b[0m \x1b[4;33m19\x1b[0m testval5', '\x1b[0;96m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m \x1b[0;96m99\x1b[0m testval6', '\x1b[1;34m\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \x1b[0m\x1b[1;34m404\x1b[0m testval7'] assert res == expected
def print_histogram(data, label=''): # Fill in percentages of the total. for key, datapoint in enumerate(data): name, value = datapoint percentage = (float(value) / stats['count']) * 100 if stats['count'] else 0 name = '(%6.2f%%) %s' % (percentage, name) data[key] = (name, value) graph = Pyasciigraph() for line in graph.graph(label, data): # Encode explicitly to get around this bug: # https://github.com/kakwa/py-ascii-graph/issues/4 print line.encode('utf-8')
def test_no_label(self): test = [(1, 423), (2, 1234), (3, 531), ('line4', 200), ('line5', 834)] graph = Pyasciigraph() res = graph.graph(data=test) expected = [ '██████████████████████ 423 1 ', '██████████████████████████████████████████████████████████████████ 1234 2 ', '████████████████████████████ 531 3 ', '██████████ 200 line4', '████████████████████████████████████████████ 834 line5', ] assert res == expected
def twitteruser(account): d = webdriver.Remote(command_executor="http://127.6.138.129:15002", desired_capabilities=webdriver.DesiredCapabilities.PHANTOMJS) L = "https://twitter.com/"+account d.get(L) soup = BeautifulSoup(d.page_source, "html.parser") try: try: name = soup.find('a', {'class': 'ProfileHeaderCard-nameLink u-textInheritColor js-nav'}).text #print 'name' if soup.find('span', {'class': 'js-display-url'}): desc = soup.find('span', {'class': 'js-display-url'}).text #print 'desc' else: desc = 'Too lazy to write' if soup.find('span', {'class':'ProfileHeaderCard-locationText u-dir'}): location = soup.find('span', {'class':'ProfileHeaderCard-locationText u-dir'}).text #print 'location' else: location = 'Probably in Mars' if soup.find('span', {'class':'ProfileHeaderCard-joinDateText js-tooltip u-dir'}): joined = soup.find('span', {'class':'ProfileHeaderCard-joinDateText js-tooltip u-dir'}).text #print 'joined' else: joined = 'The beginning of time' tweets = soup.find('li', {'class': 'ProfileNav-item--tweets'}).find('span', {"class":"ProfileNav-value"}).text following = soup.find('li', {'class':'ProfileNav-item--following'}).find('span', {"class":"ProfileNav-value"}).text followers = soup.find('li', {'class':'ProfileNav-item--followers'}).find('span', {"class":"ProfileNav-value"}).text likes = soup.find('li', {'class':'ProfileNav-item--favorites'}).find('span', {"class":"ProfileNav-value"}).text except: return 'This is new, note the account and send it to me @m4d_d3v' if ',' in tweets.split()[0] or following.split()[0] or followers.split()[0] or likes.split()[0]: tweets = tweets.replace(',', '') following = following.replace(',', '') followers = followers.replace(',', '') likes = likes.replace(',', '') table = [['Name', name], ['Description', desc], ['Location', location], ['Joined', joined]] chart = [('tweets', int(tweets.split()[0])), ('following', int(following.split()[0])), ('followers', int(followers.split()[0])), ('likes', int(likes.split()[0]))] gr = Pyasciigraph() show = gr.graph('Stats',chart) return "\033[1;34m%s\n\n%s\n%s\n%s\n%s\n%s\n\033[m\n" %(tabulate(table), show[0], show[2], show[3], show[4], show[5]) except: return "\033[1;31mCould not find user\033[m\n"
def test_alternate_graphsymbol(self): test = [('long_labe☭', 423), ('sl', 1234), ('line3', 531), ('line4', 200), ('line5', 834)] graph = Pyasciigraph(graphsymbol='*') res = graph.graph('☭test print', test) expected = [ '☭test print', '###############################################################################', '******************** 423 long_labe☭', '************************************************************* 1234 sl ', '************************** 531 line3 ', '********* 200 line4 ', '***************************************** 834 line5 ', ] assert res == expected
def test_unsorted_default_params(self): test = [('long_labe☭', 423), ('sl', 1234), ('line3', 531), ('line4', 200), ('line5', 834)] graph = Pyasciigraph() res = graph.graph('☭test print', test) expected = [ '☭test print', '###############################################################################', '████████████████████ 423 long_labe☭', '█████████████████████████████████████████████████████████████ 1234 sl ', '██████████████████████████ 531 line3 ', '█████████ 200 line4 ', '█████████████████████████████████████████ 834 line5 ', ] assert res == expected
def test_float_format(self): test = [('long_labe☭', 423.197), ('sl', 1234.12341), ('line3', 531.11), ('line4', 200), ('line5', 834)] graph = Pyasciigraph(float_format='{0:,.2f}') res = graph.graph('☭test print', test) expected = [ '☭test print', '###############################################################################', '███████████████████ 423.20 long_labe☭', '█████████████████████████████████████████████████████████ 1,234.12 sl ', '████████████████████████ 531.11 line3 ', '█████████ 200.00 line4 ', '██████████████████████████████████████ 834.00 line5 ', ] assert res == expected
def test_zeros(self): test = [('long_labe☭', 0), ('sl', 0), ('line3', 0), ('line4', 0), ('line5', 0)] graph = Pyasciigraph() res = graph.graph('☭test print', test) expected = [ '☭test print', '###############################################################################', ' 0 long_labe☭', ' 0 sl ', ' 0 line3 ', ' 0 line4 ', ' 0 line5 ', ] assert res == expected
def getPasswords(): print "Unique passwords: " + executeQuery("db.session.distinct('auth_attempts.password').length") if verbose or veryVerbose: passwordList = executeQuery("db.session.aggregate([{\$unwind:'\$auth_attempts'},{\$group:{_id:'\$auth_attempts.password','count':{\$sum:1}}},{\$sort:{count:-1}}]).forEach(function(x){printjson(x)})").split('\n') for pair in passwordList: match = re.search(r'"_id" : "(.*)", "count" : (\d+) }',pair) if match: countByPassword[match.group(1)] = int(match.group(2)) print figlet_format('Passwords', font='small') graph = Pyasciigraph() for line in graph.graph('', sorted(countByPassword.items(), key=operator.itemgetter(1), reverse=True)): print(line) print else: passwordList = executeQuery("db.session.aggregate([{\$unwind:'\$auth_attempts'},{\$group:{_id:'\$auth_attempts.password','count':{\$sum:1}}},{\$sort:{count:-1}},{\$limit:10}]).forEach(function(x){printjson(x)})").split('\n') for pair in passwordList: match = re.search(r'"_id" : "(.*)", "count" : (\d+) }',pair) if match: countByPassword[match.group(1)] = int(match.group(2)) print figlet_format('Passwords ( Top 10 )', font='small') graph = Pyasciigraph() for line in graph.graph('', sorted(countByPassword.items(), key=operator.itemgetter(1), reverse=True)): print(line) print