def print_table(data): """Print the table of detected SSIDs and their data to screen. Positional arguments: data -- list of dictionaries. """ table = AsciiTable([COLUMNS]) table.justify_columns[2] = 'right' table.justify_columns[3] = 'right' table.justify_columns[4] = 'right' table_data = list() for row_in in data: row_out = [ str(row_in.get('ssid', '')).replace('\0', ''), str(row_in.get('security', '')), str(row_in.get('channel', '')), str(row_in.get('frequency', '')), str(row_in.get('signal', '')), str(row_in.get('bssid', '')), ] if row_out[3]: row_out[3] += ' MHz' if row_out[4]: row_out[4] += ' dBm' table_data.append(row_out) sort_by_column = [c.lower() for c in COLUMNS].index(OPTIONS['--key'].lower()) table_data.sort(key=lambda c: c[sort_by_column], reverse=OPTIONS['--reverse']) table.table_data.extend(table_data) print(table.table)
def write_out_results(self): stats = self._calc_stats() rps = stats.rps results_table_data = [ ['Item', 'Value', 'Info'], ['Successful calls', '%r'%stats.count, '测试成功的连接数'], ['Total time', '%.4f'%stats.total_time, '总耗时'], ['Average Time', '%.4f'%stats.avg, '每个连接的平均耗时'], ['Fatest Time', '%.4f'%stats.min, '最小耗时'], ['Slowest Time', '%.4f'%stats.max, '最大耗时'], ['Amplitude', '%4f'%stats.amp, '最大耗时和最小耗时之差'], ['Stand deviation', '%.6f'%stats.stdev, '耗时标准差'], ['Request Per Second', '%d'%rps, '每秒的访问量'] ] results_table = AsciiTable(results_table_data, 'Rsults') results_table.inner_row_border = True print('\n') print(results_table.table) print('\r\r\n\n') status_table_data = [ ['Status Code', 'Items'] ] for code, items in self.status_code_counter.items(): status_table_data.append(['%d'%code, '%d'%len(items)]) status_table = AsciiTable(status_table_data, 'StausCode') print(status_table.table)
def search(substring, include_deleted, include_pending, include_external, include_system, **criteria): """Searches users matching some criteria""" assert set(criteria.viewkeys()) == {'first_name', 'last_name', 'email', 'affiliation'} criteria = {k: v for k, v in criteria.viewitems() if v is not None} res = search_users(exact=(not substring), include_deleted=include_deleted, include_pending=include_pending, external=include_external, allow_system_user=include_system, **criteria) if not res: print(cformat('%{yellow}No results found')) return elif len(res) > 100: click.confirm('{} results found. Show them anyway?'.format(len(res)), abort=True) users = sorted((u for u in res if isinstance(u, User)), key=lambda x: (x.first_name.lower(), x.last_name.lower(), x.email)) externals = sorted((ii for ii in res if isinstance(ii, IdentityInfo)), key=lambda x: (_safe_lower(x.data.get('first_name')), _safe_lower(x.data.get('last_name')), _safe_lower(x.data['email']))) if users: table_data = [['ID', 'First Name', 'Last Name', 'Email', 'Affiliation']] for user in users: table_data.append([unicode(user.id), user.first_name, user.last_name, user.email, user.affiliation]) table = AsciiTable(table_data, cformat('%{white!}Users%{reset}')) table.justify_columns[0] = 'right' print(table.table) if externals: if users: print() table_data = [['First Name', 'Last Name', 'Email', 'Affiliation', 'Source', 'Identifier']] for ii in externals: data = ii.data table_data.append([data.get('first_name', ''), data.get('last_name', ''), data['email'], data.get('affiliation', '-'), ii.provider.name, ii.identifier]) table = AsciiTable(table_data, cformat('%{white!}Externals%{reset}')) print(table.table)
def list(self, name: str, running: bool, available: bool): """ Get running/available listeners Usage: list [<name>] [--running] [--available] [-h] Arguments: name filter by listener name Options: -h, --help Show dis -r, --running List running listeners -a, --available List available listeners """ table_data = [ ["Name", "Description"] ] for l in self.loaded: table_data.append([l.name, l.description]) table = AsciiTable(table_data, title="Available") table.inner_row_border = True print(table.table) table_data = [ ["Type", "Name", "URL"] ] for l in self.listeners: table_data.append([l.name, l["Name"], f"https://{l['BindIP']}:{l['Port']}"]) table = AsciiTable(table_data, title="Running") table.inner_row_border = True print(table.table)
def order_summary(entry, exit, commission): data = [] if 'stop_price' in entry: data.append( ['%(action)s %(quantity)s %(ticker)s STOP $%(stop_price).2f LIMIT' % entry.as_dict(), pf(entry.price), Color('{cyan}%s{/cyan}' % pf(cost(entry, exit, commission)))] ) else: data.append( ['%(action)s %(quantity)s %(ticker)s LIMIT' % entry.as_dict(), pf(entry.price), Color('{cyan}%s{/cyan}' % pf(cost(entry, exit, commission)))] ) data.extend([ ['50% Target', pf(half_target_price(entry, exit)), '+%s' % pf(half_target_profit(entry, exit, commission))], ['Target', pf(exit.target_price), '+%s' % pf(target_profit(entry, exit, commission))], ['Profit', '', Color('{green}+%s{/green}' % pf(total_profit(entry, exit, commission)))], ['Stop loss', pf(exit.stop_price), Color('{hired}-%s{/red}' % pf(risk(entry, exit, commission)))], ['Risk/Reward', '', Color('{%(color)s}%(risk_reward).1f to 1{/%(color)s}' % { 'risk_reward': risk_reward(entry, exit, commission), 'color': 'green' if risk_reward(entry, exit, commission) >= 3 else 'hired' })], ]) table = AsciiTable(data) table.inner_column_border = False print(table.table)
def home_office(ctx, year=CURRENT_YEAR): """ Show home office expenses. """ ss = open_spreadsheet('Home Office %s' % year) worksheet = ss.worksheet('Monthly fees') categories = defaultdict(Decimal) for row in worksheet.get_all_records(): categories['hoa assessments'] += get_decimal(row['hoa assessments']) categories['homeowners insurance'] += get_decimal(row['homeowners insurance']) categories['mortgage'] += get_decimal(row['mortgage']) categories['utilities (gas & electric)'] += \ get_decimal(row['electric']) + get_decimal(row['gas']) data = [(k.capitalize(), v) for k, v in categories.items()] data += [ (f'Total for {year}', sum(categories.values())), (f'Office rent for {year}', sum(categories.values()) / 4), ('Repairs & maintenance', get_rm_total(ss)), ] table = AsciiTable(data, 'Home office') table.inner_heading_row_border = False print(table.table)
def test_single_line(): """Test single-lined cells.""" table_data = [ ['Name', 'Color', 'Type'], ['Avocado', 'green', 'nut'], ['Tomato', 'red', 'fruit'], ['Lettuce', 'green', 'vegetable'], ['Watermelon', 'green'], [], ] table = AsciiTable(table_data, 'Example') table.inner_footing_row_border = True table.justify_columns[0] = 'left' table.justify_columns[1] = 'center' table.justify_columns[2] = 'right' actual = table.table expected = ( '+Example-----+-------+-----------+\n' '| Name | Color | Type |\n' '+------------+-------+-----------+\n' '| Avocado | green | nut |\n' '| Tomato | red | fruit |\n' '| Lettuce | green | vegetable |\n' '| Watermelon | green | |\n' '+------------+-------+-----------+\n' '| | | |\n' '+------------+-------+-----------+' ) assert actual == expected
def test_attributes(): """Test with different attributes.""" table_data = [ ['Name', 'Color', 'Type'], ['Avocado', 'green', 'nut'], ['Tomato', 'red', 'fruit'], ['Lettuce', 'green', 'vegetable'], ] table = AsciiTable(table_data) assert 31 == max(len(r) for r in table.table.splitlines()) assert 31 == table.table_width table.outer_border = False assert 29 == max(len(r) for r in table.table.splitlines()) assert 29 == table.table_width table.inner_column_border = False assert 27 == max(len(r) for r in table.table.splitlines()) assert 27 == table.table_width table.padding_left = 0 assert 24 == max(len(r) for r in table.table.splitlines()) assert 24 == table.table_width table.padding_right = 0 assert 21 == max(len(r) for r in table.table.splitlines()) assert 21 == table.table_width
def run(self): self.print_banner() self.print_help() while True: command = self.input() if command == 'addcert': count = self.manager.command_addcert() print "Successfully added %s certificates" % count elif command == 'settings': print SETTINGS elif command == 'help': self.print_banner() self.print_help() elif command == 'report': rows, week_start, week_end, total = self.manager.command_report() table = AsciiTable(rows, 'Certificates obtained %s-%s' % (week_start, week_end)) table.outer_border = False print table.table print "\nTotal certificates obtained: %s" % total elif command == 'delete': self.manager.command_delete() elif command == 'exit': return 0 else: pass
async def live(network, channel, message): streams = await network.application.TwitchAPI.live() headers = ['Streamer', 'Game', 'Viewers', 'Uptime'] out = [headers,] now = datetime.datetime.utcnow() for stream in streams: started = datetime.datetime.strptime(stream['created_at'],'%Y-%m-%dT%H:%M:%SZ') hours = (now-started).seconds // 3600 minutes = ( (now-started).seconds // 60 ) % 60 oneline = '{} has been live for {}:{}, now playing {} w/ {} viewers.\n'.format( stream['channel']['display_name'], hours, minutes, stream['game'], stream['viewers'] ) oneline = [ stream['channel']['display_name'], stream['game'], str(stream['viewers']), '{}h{}m'.format(hours,minutes), ] out.append(oneline) table = AsciiTable(out) for i in range(len(out[0])): table.justify_columns[i] = 'center' await network.send_message(channel, '\n`{}`'.format(table.table))
def view_services(filterquery=None): """Prints out list of services and its relevant information""" table = [] table.append(["Service Name", "Stacks", "Containers", "Parent S", "Child S", "Endpoints" ]) if filterquery: services = filterquery.all() #services = session.query(filterquery).all() else: services = session.query(Service).all() if not services: print "No services met the search" return for service in services: state = service.get_state() parents = [p['parent'] for p in state['parent']] children = [c['child'] for c in state['childs']] cs = [] for stack in state['stacks']: for i, container in enumerate(stack['container']): endpoint = service.tree_on_stack_pointer(i) if endpoint: cs.append("%s:%s:%s" % (container['name'], container['version'], endpoint.name)) else: cs.append("%s:%s" % (container['name'], container['version'])) #cs.extend(["%s:%s" % (c['name'],c['version']) for c in stack['container']]) table.append([str(state['name']), "\n".join([ s['name'] for s in state['stacks'] if s]), str("\n".join(cs)), "\n".join(parents), "\n".join(children), "\n".join(state['endpoints'])]) t = AsciiTable(table) t.inner_row_border = True print t.table
def print_table(self, data, title=None): print "" table = AsciiTable(data) if title: table.title = title print table.table print ""
def table(header, rows): if not HAVE_TERMTAB: print_error("Missing dependency, install terminaltables (`pip install terminaltables`)") return # TODO: Refactor this function, it is some serious ugly code. content = [header] + rows # Make sure everything is string try: content = [[a.replace('\t', ' ') for a in list(map(unicode, l))] for l in content] except: # Python3 way of doing it: content = [[a.replace('\t', ' ') for a in list(map(str, l))] for l in content] t = AsciiTable(content) if not t.ok: longest_col = t.column_widths.index(max(t.column_widths)) max_length_col = t.column_max_width(longest_col) if max_length_col > 0: for i, content in enumerate(t.table_data): if len(content[longest_col]) > max_length_col: temp = '' for l in content[longest_col].splitlines(): if len(l) > max_length_col: temp += '\n'.join(textwrap.wrap(l, max_length_col)) + '\n' else: temp += l + '\n' content[longest_col] = temp.strip() t.table_data[i] = content return t.table
def print_download_item(self, item, ascii_table): dimensions = get_max_dimensions(ascii_table) title = "" for line in textwrap.wrap( item.podcast.title, dimensions[0][0], initial_indent=' ', subsequent_indent=' '): title += line + "\n" summ = "" for line in textwrap.wrap( item.summary, dimensions[0][1], initial_indent=' ', subsequent_indent=' '): summ += line + "\n" if ascii_table: ascii_table.table_data.append([title, summ]) print(ascii_table_last(ascii_table)) return ascii_table else: table_headers = [['title', 'summary']] table_data = [[title, summ]] ascii_table = AsciiTable(table_headers + table_data) ascii_table.inner_row_border = True print(ascii_table.table) return ascii_table
def print_table(table_data): table = AsciiTable(table_data) table.inner_row_border = True if table_data[:1] in ([['TITLE', 'IMDB RATING']], [['TITLE', 'TOMATO RATING']]): table.justify_columns[1] = 'center' print("\n") print(table.table)
def show_license(self): licenses = self.metascan.get_license() details = [] for lic in licenses.json().iteritems(): details.append([str(lic[0]), str(lic[1])]) table = AsciiTable(details, "Licenses") table.inner_heading_row_border = False print table.table
def show_workflows(self): details = [] workflows = self.metascan.get_workflows() for wf in workflows.json(): details.append([wf["name"]]) table = AsciiTable(details, "Workflows") table.inner_heading_row_border = False print table.table
def frameTable(): frameTableP1 = [ [],['Frame#', 'Process#', 'Page#'], [('\n'.join(map(str,range(16)))),('\n'.join(map(str,[j[1][0] for j in zipFrame]))), (('\n'.join(map(str,[l[1][1] for l in zipFrame]))))] ] table1 = AsciiTable(frameTableP1) table1.title='--------FRAME TABLE' print table1.table
def pTable1(): ####process table p1 pageTableP1 = [[], ['Page #', 'Frame#'], [('\n'.join(map(str,newP1))), ('\n'.join(map(str,([i for i,c in enumerate(zipFrame) if c[1][0]=='P1:' ]))))] ] table = AsciiTable(pageTableP1) table.title='---P1 Page Table' print table.table
def print_matches(matches): matches_arr = [] for match in matches: #match_arr = [str(match[2]), str(match[3]), str(match[4]), str(match[5]), str(match[1])] match_arr = [str(match[2]), str(match[3]), str(match[4]), str(match[5])] matches_arr.append(match_arr) #print (matches_array) table = AsciiTable(matches_arr) table.inner_heading_row_border = False table.inner_row_border = True print (table.table)
def pTable2(): ####process table p2 pageTableP2 = [[], ['Page #', 'Frame#'], [('\n'.join(map(str,newP2))), ('\n'.join(map(str,([i for i,c in enumerate(zipFrame) if c[1][0]=='P2:' ]))))] ] table2 = AsciiTable(pageTableP2) table2.title='---P2 Page Table' print table2.table
def pTable3(): ####process table p3 pageTableP3 = [[], ['Page #', 'Frame#'], [('\n'.join(map(str,newP3))), ('\n'.join(map(str,([i for i,c in enumerate(zipFrame) if c[1][0]=='P3:' ]))))] ] table3 = AsciiTable(pageTableP3) table3.title='---P3 Page Table' print table3.table
def pTable4(): ####process table p4 pageTableP4 = [[], ['Page #', 'Frame#'], [('\n'.join(map(str,newP4))), ('\n'.join(map(str,([i for i,c in enumerate(zipFrame) if c[1][0]=='P4:' ]))))] ] table4 = AsciiTable(pageTableP4) table4.title='---4 Page Table' print table4.table
def pTable5(): ####process table p5 pageTableP5 = [[], ['Page #', 'Frame#'], [('\n'.join(map(str,newP5))), ('\n'.join(map(str,([i for i,c in enumerate(zipFrame) if c[1][0]=='P5:' ]))))] ] table5 = AsciiTable(pageTableP5) table5.title='---P5 Page Table' print table5.table
def __report_summary_labels(self, cumulative): data = [("label", "status", "succ", "avg_rt", "error")] justify = {0: "left", 1: "center", 2: "right", 3: "right", 4: "left"} sorted_labels = sorted(cumulative.keys()) for sample_label in sorted_labels: if sample_label != "": data.append(self.__get_sample_element(cumulative[sample_label], sample_label)) table = SingleTable(data) if sys.stdout.isatty() else AsciiTable(data) table.justify_columns = justify self.log.info("Request label stats:\n%s", table.table)
def output(buf, dowrap=False): bbuf = [["Bot", "Pack#", "Size", "File"]] + buf t = AsciiTable(bbuf) t.inner_column_border = False t.outer_border = False if dowrap and sys.stdout.isatty(): mw = t.column_max_width(3) for e in bbuf: if len(e[3])>mw: e[3] = "\n".join(wrap(e[3], mw)) print(t.table) sys.stdout.flush()
def test_title(): """Test that table title shows up correctly.""" table_data = [ ['Name', 'Color', 'Type'], ['Avocado', 'green', 'nut'], ['Tomato', 'red', 'fruit'], ['Lettuce', 'green', 'vegetable'], ] table = AsciiTable(table_data, 'Foods') expected = dedent("""\ +Foods----+-------+-----------+ | Name | Color | Type | +---------+-------+-----------+ | Avocado | green | nut | | Tomato | red | fruit | | Lettuce | green | vegetable | +---------+-------+-----------+""") assert expected == table.table table.title = 'Foooooooooooooods' expected = dedent("""\ +Foooooooooooooods+-----------+ | Name | Color | Type | +---------+-------+-----------+ | Avocado | green | nut | | Tomato | red | fruit | | Lettuce | green | vegetable | +---------+-------+-----------+""") assert expected == table.table table.title = 'Foooooooooooooodsssssssssssss' expected = dedent("""\ +Foooooooooooooodsssssssssssss+ | Name | Color | Type | +---------+-------+-----------+ | Avocado | green | nut | | Tomato | red | fruit | | Lettuce | green | vegetable | +---------+-------+-----------+""") assert expected == table.table table.title = 'Foooooooooooooodssssssssssssss' expected = dedent("""\ +---------+-------+-----------+ | Name | Color | Type | +---------+-------+-----------+ | Avocado | green | nut | | Tomato | red | fruit | | Lettuce | green | vegetable | +---------+-------+-----------+""") assert expected == table.table
def view_endpoints(): """Lists the endpoints defined Arguments: *sort by service *sort by stage """ table_data = [['Endpoint name', 'ip', 'pubport', 'url', 'mainservice', 'stackpointer', 'tree']] for endpoint in session.query(Endpoint).all(): subtree = view_endpoint_tree(endpoint) table_data.append([str(endpoint.name), str(endpoint.ip), str(endpoint.pubport), str(endpoint.url), str(endpoint.service.name), str(endpoint.stackpointer), subtree]) table = AsciiTable(table_data) table.inner_row_border = True print table.table
def test_multi_line(): """Test multi-lined cells.""" table_data = [ ['Show', 'Characters'], ['Rugrats', 'Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles,\nDil Pickles'], ['South Park', 'Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick'] ] table = AsciiTable(table_data) # Test defaults. actual = table.table expected = ( '+------------+-------------------------------------------------------------------------------------+\n' '| Show | Characters |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n' '| | Dil Pickles |\n' '| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n' '+------------+-------------------------------------------------------------------------------------+' ) assert actual == expected # Test inner row border. table.inner_row_border = True actual = table.table expected = ( '+------------+-------------------------------------------------------------------------------------+\n' '| Show | Characters |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n' '| | Dil Pickles |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n' '+------------+-------------------------------------------------------------------------------------+' ) assert actual == expected # Justify right. table.justify_columns = {1: 'right'} actual = table.table expected = ( '+------------+-------------------------------------------------------------------------------------+\n' '| Show | Characters |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| Rugrats | Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles, |\n' '| | Dil Pickles |\n' '+------------+-------------------------------------------------------------------------------------+\n' '| South Park | Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick |\n' '+------------+-------------------------------------------------------------------------------------+' ) assert actual == expected
def view_stack(service, stackname=None): """View the stack container version and position and tree points Arguments: service: service object stackname: Name of stack if only one stack should be viewed """ table_data = [['Stackname', 'host', 'image', 'conatiners']] for stack in service.stacks: table_data.append([str(stack.name), str(stack.host), str(stack.image), "\n".join([c.name for c in stack.container])]) print table_data table = AsciiTable(table_data) table.inner_row_border = True print table.table
def OutputTable(self, msg, select_tables): if self.numberLoop < 6: self.numberLoop = self.numberLoop + 1 else: self.numberLoop = 0 if len(self.all_data) > 6: del self.all_data[-6] self.all_data.append([msg]) data = self.all_data if select_tables == 1: table = AsciiTable(data) print(table.table) elif select_tables == 2: try: self.account_info = self.requestStringNowait("update.php", accesstoken=self.Configuration["accessToken"]) self.exploits = int(self.account_info["exploits"]) progress = round(int(self.account_info["exp"]))/round(int(self.account_info["expreq"])) account_information = [["your account information", "update information"], ["{0}: {1}\n{2}: {3}\n{4}: {5}\n{6}: {7}\n{8}: {9}\n{10}: {11}".format("Your exploits ", self.exploits, "Your spam ", self.account_info["spam"], "Your network speed ", self.account_info["inet"], "Your money ", self.account_info["money"], "Your IP ", self.account_info["ipaddress"], "Your netcoins ", self.account_info["netcoins"]), "{}: {}\n{}: {}\n{}: {}\n{}: {}\n{}: {}, XP({}%)".format("Your SDK ", self.account_info["sdk"], "Your Firewall ", self.account_info["fw"], "Your Antivirus ", self.account_info["av"], "Your BruteForce ", self.account_info["brute"], "Your level ", self.account_info["level"], round(progress*100, 1))]] except KeyError: account_information = [["your account information", "update information"], ["Error", "Error"]] sys.exit() table1 = SingleTable(data) table2 = SingleTable(account_information) time.sleep(0.3) if self.platform == "Linux": print("\033[H\033[J") else: os.system('cls') req_version = (3,0) cur_version = sys.version_info # for windows Try to print tables else pass python 2 try: print(table1.table) print(table2.table) if windows is False: sys.stdout.write("""\nCMD: [m] Get Miner info [a] Get All applications [q] Quit Program Waiting for user input : """) with raw_mode(sys.stdin): try: if cur_version <= req_version: while True: ch = sys.stdin.read(1) if ch == "a": p = Player(self) getTask = self.requestString("tasks.php", accesstoken=self.Configuration["accessToken"]) sdk = p.getHelperApplication()["SDK"]["level"] ipsp = p.getHelperApplication()["IPSP"]["level"] bp = p.getHelperApplication()["BP"]["level"] brute = p.getHelperApplication()["BRUTE"]["level"] spam = p.getHelperApplication()["SPAM"]["level"] fw = p.getHelperApplication()["FW"]["level"] av = p.getHelperApplication()["AV"]["level"] sys.stdout.write("\n \ SDK: {} \ IPSP: {}\n \ Bank Protect: {} \ BruteForce: {}\n \ SPAM: {} \ Firewall: {}\n \ Antivirus: {}".format(sdk, ipsp, bp, brute, spam, fw, av)) time.sleep(1.5) if ch == "m": self.minefinish = int(self.account_info['minerLeft']) sys.stdout.write("\nminerLeft {} in secondes".format(self.minefinish)) sys.stdout.write("\nwaiting until {} --- {}".format(self.tuntin(self.minefinish), datetime.timedelta(seconds=(self.minefinish)))) time.sleep(1) if ch == "q": sys.stdout.write("\nok ok, good bye ;)\n") sys.exit() else: while True: ch = sys.stdin.read(1) if ch == "a": p = Player(self) getTask = self.requestString("tasks.php", accesstoken=self.Configuration["accessToken"]) sdk = p.getHelperApplication()["SDK"]["level"] ipsp = p.getHelperApplication()["IPSP"]["level"] bp = p.getHelperApplication()["BP"]["level"] brute = p.getHelperApplication()["BRUTE"]["level"] spam = p.getHelperApplication()["SPAM"]["level"] fw = p.getHelperApplication()["FW"]["level"] av = p.getHelperApplication()["AV"]["level"] sys.stdout.write("\n \ SDK: {} \ IPSP: {}\n \ Bank Protect: {} \ BruteForce: {}\n \ SPAM: {} \ Firewall: {}\n \ Antivirus: {}".format(sdk, ipsp, bp, brute, spam, fw, av)) time.sleep(1.5) if str(ch) == "m": self.minefinish = int(self.account_info['minerLeft']) sys.stdout.write("\nminerLeft {} in secondes".format(self.minefinish)) sys.stdout.write("\nwaiting until {} --- {}".format(self.tuntin(self.minefinish), datetime.timedelta(seconds=(self.minefinish)))) time.sleep(1) if ch == "q": sys.stdout.write("\nok ok, good bye ;)\n") sys.exit() break except (KeyboardInterrupt, EOFError): pass except IOError as e: pass
def dijkstra(self, source, terminus, early_stop=True, file_name="output/dijkstra_output.txt"): start = timer() # checking for invalid source or terminus invalid_source = True invalid_terminus = True for node in self.node_to_neighbor: if source == node: invalid_source = False if terminus in self.node_to_neighbor[node]: invalid_terminus = False if invalid_source: print("Error: Invalid source") return False if invalid_terminus: print("Error: Invalid terminus") return False # initializing set S and vertex table S = set() node_to_pred = {} node_to_dist = {} for node in self.node_set: node_to_pred[node] = source node_to_dist[node] = inf node_to_dist[source] = 0 # individual iteration my_file = open(file_name, "w") table_data = generate_table_v1(self.node_set, node_to_pred, node_to_dist) table = AsciiTable(table_data) my_file.write(table.table) my_file.write("\n") if early_stop: while terminus not in S: try: S, node_to_pred, node_to_dist = self.dijkstra_iteration( S, node_to_pred, node_to_dist) table_data = generate_table_v1(self.node_set, node_to_pred, node_to_dist) table = AsciiTable(table_data) my_file.write(table.table) my_file.write("\n") except Exception as exc: print("Error in iteration,", exc) break else: for i in range(len(self.node_set)): try: S, node_to_pred, node_to_dist = self.dijkstra_iteration( S, node_to_pred, node_to_dist) table_data = generate_table_v1(self.node_set, node_to_pred, node_to_dist) table = AsciiTable(table_data) my_file.write(table.table) my_file.write("\n") except Exception as exc: print("Error in iteration,", exc) break time = timer() - start my_file.write("Took %f seconds." % time) my_file.close()
def generate_table_of_geoip_breakdown(traffic_report, csv_output=False): '''Generates a table of GeoIP breakdown''' # We'll sort on transmitted data table_data = [[ 'Country', 'Region', 'RX Bytes', 'TX Bytes', '% Rx', '% Tx' ]] # Group the results together sorted_trs = {} unknown_entry = None total_tx = 0 total_rx = 0 # Sort it into country and regions for report in traffic_report: if report.country_name not in sorted_trs: sorted_trs[report.country_name] = [] country_dict = sorted_trs[report.country_name] country_dict.append(report) total_tx += report.total_tx_bytes total_rx += report.total_rx_bytes tx_entry_dict = {} # Now generate the report for country in sorted_trs.keys(): table_entry = [] country_tx = 0 country_rx = 0 # Add up all the entries in the country for report in sorted_trs[country]: country_tx += report.total_tx_bytes country_rx += report.total_rx_bytes # Calculate the precents try: rx_percentage = ("{0:.2f}".format( (country_rx / total_rx) * 100)) except ZeroDivisionError: rx_percentage = "{0:.2f}".format(0) try: tx_percentage = ("{0:.2f}".format( (country_tx / total_tx) * 100)) except ZeroDivisionError: tx_percentage = "{0:.2f}".format(0) # If there's only one entry, merge them to one line region_field = "" if len(sorted_trs[country]) == 1: region_field = sorted_trs[country][0].region_name # Special Handling for the Unknown field if country == "Unknown": unknown_entry = [[ country, "", country_rx, country_tx, rx_percentage, tx_percentage ]] continue # And now build the table table_entry.append([ country, region_field, country_rx, country_tx, rx_percentage, tx_percentage ]) # Calculate the precentage of the total if csv_output is False: first_field = "" else: first_field = country if len(sorted_trs[country]) != 1: for report in sorted_trs[country]: table_entry.append([ first_field, report.region_name, report.total_rx_bytes, report.total_tx_bytes, "", "" ]) # This can happen if we get two stat dicts with the same percentage if tx_percentage in tx_entry_dict: tx_entry_dict[tx_percentage] += table_entry else: tx_entry_dict[tx_percentage] = table_entry for key in sorted(tx_entry_dict.keys(), reverse=True): table_data += tx_entry_dict[key] if csv_output is False: # Append the Unknown entry at the end if it exists if unknown_entry is not None: table_data += unknown_entry # And now the total table_data.append([]) table_data.append( ["Total", "", total_rx, total_rx, float(100), float(100)]) # Now generate a pretty table and return it table = AsciiTable(table_data) return table.table else: # This is horrible and hacky csv_contents = io.StringIO() writer = csv.writer(csv_contents) for row in table_data: writer.writerow(row) return csv_contents.getvalue()
sg16 = json_data['data'][16]['sg'] sg17 = json_data['data'][17]['sg'] sg18 = json_data['data'][18]['sg'] sg19 = json_data['data'][19]['sg'] sg20 = json_data['data'][20]['sg'] sg21 = json_data['data'][21]['sg'] sg22 = json_data['data'][22]['sg'] sg23 = json_data['data'][23]['sg'] sg24 = json_data['data'][24]['sg'] station = json_data['meta']['station']['name'] dist = json_data['meta']['station']['distance'] data = [] data.append([date_0]) data.append(['00:00', sg0, '12:00', sg12]) data.append(['01:00', sg1, '13:00', sg13]) data.append(['02:00', sg2, '14:00', sg14]) data.append(['03:00', sg3, '15:00', sg15]) data.append(['04:00', sg4, '16:00', sg16]) data.append(['05:00', sg5, '17:00', sg17]) data.append(['06:00', sg6, '18:00', sg18]) data.append(['07:00', sg7, '19:00', sg19]) data.append(['08:00', sg8, '20:00', sg20]) data.append(['09:00', sg9, '21:00', sg21]) data.append(['10:00', sg10, '22:00', sg22]) data.append(['11:00', sg11, '23:00', sg23]) data.append(["Distance", dist]) table = AsciiTable(data, title=station) print(table.table)
def submit(self) -> 'Assignment': """Upload students' grades to Canvas. :return: The assignment object to allow for method chaining. :rtype: Assignment """ # Print banner print(utils.banner(f"Submitting {self.name}")) # Check that we have the canvas_assignment containing the assignment_id # ...and if we don't, get it! if not hasattr(self, 'canvas_assignment'): self.canvas_assignment = self._search_canvas_assignment() elif self.canvas_assignment is None: self.canvas_assignment = self._search_canvas_assignment() # Pull out the assignment ID assignment_id = self.canvas_assignment.get('id') # get the student IDs try: student_ids = map(lambda stu: stu.get('id'), self.course.students) except Exception: sys.exit( "No students found. Please run `course.get_students_from_canvas()` before collecting an assignment." ) grades = self._get_grades(student_ids) # Set up status reporting submission_header = [['Student ID', 'Collection Status']] submission_status = [] # for each student for grade in grades: # upload their grade resp = requests.put(url=urlparse.urljoin( self.course.canvas_url, f"/api/v1/courses/{self.course.course_id}/assignments/{assignment_id}/submissions/{grade.get('student_id')}" ), headers={ "Authorization": f"Bearer {self.course.canvas_token}", "Accept": "application/json+canvas-string-ids" }, json={ "submission": { "posted_grade": grade.get('score') } }) if resp.status_code == 200: submission_status.append([ grade.get('student_id'), f'{utils.color.GREEN}success{utils.color.END}' ]) else: submission_status.append([ grade.get('student_id'), f'{utils.color.RED}failure{utils.color.END}' ]) table = AsciiTable(submission_header + submission_status) table.title = 'Assignment Submission' print(table.table) return self
def evaluate(dataset, predictions, nms_thresh, result_output_dir, recall_metrics=(1,5), iou_metrics=(0.1,0.3,0.5,0.7), num_chunks=5): """evaluate dataset using different methods based on dataset type. Args: predictions: list[ ( moments_norm (tensor(num_predictions, 2)), scores (tensor(num_predictions)) ) ] Returns: Recall@1 mIoU float """ # Process dataset dataset_name = dataset.__class__.__name__ frequency = torch.zeros((num_chunks, num_chunks)) for idx in range(len(dataset)): moment = dataset.get_moment(idx) / dataset.get_duration(idx) moment[1] -= 1e-6 moment = (moment * num_chunks).long() frequency[moment[0], moment[1]] += 1 logger = logging.getLogger("vmr.inference") logger.info("Performing {} evaluation (Size: {}).".format(dataset_name, len(dataset))) if result_output_dir: result_dict = {'vid': [], 'sentence': [], 'iou': [], 'pred_top5': [], 'duration': []} else: result_dict = None if predictions[0][0].shape[0]==1: recall_metrics=(1,) num_recall_metrics, num_iou_metrics = len(recall_metrics), len(iou_metrics) # Initialization table = [['Rank@{},IoU@{:.1f}'.format(i,j) \ for i in recall_metrics for j in iou_metrics]] table_mtb = [['Rank@{},MTBIoU@{:.1f}'.format(i,j) \ for i in recall_metrics for j in iou_metrics]] recall_metrics = torch.tensor(recall_metrics) iou_metrics = torch.tensor(iou_metrics) recall_x_iou = torch.zeros(num_recall_metrics, num_iou_metrics) recall_x_iou_unbiased = torch.zeros(num_chunks, num_chunks, num_recall_metrics, num_iou_metrics) recall_x_mtbiou = torch.zeros(num_recall_metrics, num_iou_metrics) recall_x_miou = torch.zeros(num_recall_metrics) # Calculation for idx in tqdm(range(len(predictions))): result = predictions[idx] duration = dataset.get_duration(idx) gt_moment = dataset.get_moment(idx) gt_chunk_indices = gt_moment / duration gt_chunk_indices[1] -= 1e-6 gt_chunk_indices = (gt_chunk_indices * num_chunks).long() candidates, scores = result[0]*duration, result[1] predicted_moments = nms(candidates, scores, topk=recall_metrics[-1], thresh=nms_thresh) predicted_ious = iou(predicted_moments[:max(recall_metrics)], gt_moment) for i, r in enumerate(recall_metrics): ious_o = predicted_ious[:r] # [r/?] recall_x_miou[i] += ious_o.mean() if ious_o.size(0) < r: padding = r - ious_o.size(0) ious = torch.nn.functional.pad(ious_o, (0,padding), "constant", 0) # [r] else: ious = ious_o # [r] bools = ious[:,None].expand(r, num_iou_metrics) > iou_metrics # [r, num_iou_metrics] recall_x_iou[i] += bools.any(dim=0) # [num_iou_metrics] recall_x_iou_unbiased[gt_chunk_indices[0],gt_chunk_indices[1],i] += bools.any(dim=0) if r==1 and result_dict: result_dict['vid'].append(dataset.get_vid(idx)) result_dict['sentence'].append(dataset.get_sentence(idx)) result_dict['iou'].append(ious[0]) result_dict['pred_top5'].append(predicted_moments) result_dict['duration'].append(duration) recall_x_iou /= len(predictions) recall_x_miou /= len(predictions) recall_x_iou_unbiased /= frequency[:,:,None,None] for i in range(len(recall_metrics)): for j in range(len(iou_metrics)): temp = recall_x_iou_unbiased[:,:,i,j] recall_x_mtbiou[i,j] = temp[~torch.isnan(temp)].mean() result_dict['mtbiou'] = recall_x_mtbiou result_dict['recall_x_iou_unbiased'] = recall_x_iou_unbiased result_dict['frequency'] = frequency # Print result in table # Original results table.append(['{:.02f}'.format(recall_x_iou[i][j]*100) \ for i in range(num_recall_metrics) for j in range(num_iou_metrics)]) table = AsciiTable(table) for i in range(num_recall_metrics*num_iou_metrics): table.justify_columns[i] = 'center' # Mean of Temporal Boundary results table_mtb.append(['{:.02f}'.format(recall_x_mtbiou[i][j]*100) \ for i in range(num_recall_metrics) for j in range(num_iou_metrics)]) table_mtb = AsciiTable(table_mtb) for i in range(num_recall_metrics*num_iou_metrics): table_mtb.justify_columns[i] = 'center' logger.info('\n' + table.table) logger.info('\n' + table_mtb.table) # Print result in line # Original results result_line = ['Rank@{},IoU@{:.01f}={:.02f}'.format(recall_metrics[i],iou_metrics[j],recall_x_iou[i][j]*100) \ for i in range(num_recall_metrics) for j in range(num_iou_metrics)] result_line.extend( ['Rank@{}mIoU={:.02f}'.format(recall_metrics[i], recall_x_miou[i]*100) for i in range(num_recall_metrics)] ) logger.info('\n' + ' '.join(result_line)) # Mean of Temporal Boundary results result_line = ['Rank@{},MTBIoU@{:.01f}={:.02f}'.format(recall_metrics[i],iou_metrics[j],recall_x_mtbiou[i][j]*100) \ for i in range(num_recall_metrics) for j in range(num_iou_metrics)] logger.info('\n' + ' '.join(result_line)) # Save results if result_output_dir: with open(result_output_dir + 'test_results.pkl', 'wb') as F: # DO NOT use join for prefix '/{k}_{epoch}e' pickle.dump(result_dict, F) return recall_x_iou[0,-1]
def evaluate_new(dataset_list, predictions_list, nms_thresh, result_output_dir, recall_metrics=(1,5), iou_metrics=(0.1,0.3,0.5,0.7)): """evaluate dataset using different methods based on dataset type. Args: predictions: list[ ( moments_norm (tensor(num_predictions, 2)), scores (tensor(num_predictions)) ) ] Returns: Recall@1 mIoU float """ dataset_original, dataset_replaced = dataset_list predictions_original, predictions_replaced = predictions_list dataset_name = dataset_original.__class__.__name__ logger = logging.getLogger("vmr.inference") logger.info("Performing {} evaluation (Size: {}).".format(dataset_name, len(dataset_original))) if result_output_dir: result_dict = {'vid': [], 'sentence': [], 'iou_o': [], 'iou_rp': []} else: result_dict = None if predictions_original[0][0].shape[0]==1: recall_metrics=(1,) num_recall_metrics, num_iou_metrics = len(recall_metrics), len(iou_metrics) recall_metrics = torch.tensor(recall_metrics) iou_metrics = torch.tensor(iou_metrics) unbaised_recall_x_iou = torch.zeros(num_recall_metrics, num_iou_metrics) recall_x_miou = torch.zeros(num_recall_metrics) miss_x_iou_bias = torch.zeros(num_recall_metrics, num_iou_metrics) # number of samples failed to recall in replaced datasets for idx in tqdm(range(len(predictions_original))): duration = dataset_original.get_duration(idx) gt_moment = dataset_original.get_moment(idx) result_original = predictions_original[idx] candidates_original, scores_original = result_original[0]*duration, result_original[1] predicted_moments_original = nms(candidates_original, scores_original, topk=recall_metrics[-1], thresh=nms_thresh) predicted_ious_original = iou(predicted_moments_original[:max(recall_metrics)], gt_moment) result_replaced = predictions_replaced[idx] candidates_replaced, scores_replaced = result_replaced[0]*duration, result_replaced[1] predicted_moments_replaced = nms(candidates_replaced, scores_replaced, topk=recall_metrics[-1], thresh=nms_thresh) predicted_ious_replaced = iou(predicted_moments_replaced[:max(recall_metrics)], gt_moment) for i, r in enumerate(recall_metrics): ious_o = predicted_ious_original[:r] # [r/?] ious_rp = predicted_ious_replaced[:r] # [r/?] recall_x_miou[i] += ious_o.mean() ious_o = iou_padding(ious_o, r) ious_rp = iou_padding(ious_rp, r) bools_o = ious_o[:,None].expand(r, num_iou_metrics) > iou_metrics # [r, num_iou_metrics] bools_rp = ious_rp[:,None].expand(r, num_iou_metrics) < iou_metrics # [r, num_iou_metrics] unbaised_recall_x_iou[i] += bools_rp.all(dim=0)*bools_o.any(dim=0) # [num_iou_metrics] miss_x_iou_bias[i] += bools_rp.any(dim=0) # [num_iou_metrics] if i==1 and result_dict: result_dict['vid'].append(dataset_original.get_vid(idx)) result_dict['sentence'].append(dataset_original.get_sentence(idx)) result_dict['iou_o'].append(ious_o[:5]) result_dict['iou_rp'].append(ious_rp[:5]) hard_unbaised_recall_x_iou, unbaised_recall_x_iou = unbaised_recall_x_iou/miss_x_iou_bias, unbaised_recall_x_iou/len(predictions_original) recall_x_miou /= len(predictions_original) # Print result in table table = [['hard_UBRank@{},IoU@{:.1f}'.format(i,j) \ for i in recall_metrics for j in iou_metrics]] table.append(['{:.02f}'.format(hard_unbaised_recall_x_iou[i][j]*100) \ for i in range(num_recall_metrics) for j in range(num_iou_metrics)]) table = AsciiTable(table) for i in range(num_recall_metrics*num_iou_metrics): table.justify_columns[i] = 'center' logger.info('\n' + table.table) table = [['UBRank@{},IoU@{:.1f}'.format(i,j) \ for i in recall_metrics for j in iou_metrics]] table.append(['{:.02f}'.format(unbaised_recall_x_iou[i][j]*100) \ for i in range(num_recall_metrics) for j in range(num_iou_metrics)]) table = AsciiTable(table) for i in range(num_recall_metrics*num_iou_metrics): table.justify_columns[i] = 'center' logger.info('\n' + table.table) if result_output_dir: with open(result_output_dir + 'test_results.pkl', 'wb') as F: # DO NOT use join for prefix '/{k}_{epoch}e' pickle.dump(result_dict, F)
def reprint_result(self): print(AsciiTable(self.result).table)
def build_data_structure(predicted_postag, predicted_chunk, stat): if stat == True: print("Loading", nltk.corpus.conll2000.fileids()[0], ", and,", nltk.corpus.conll2000.fileids()[1]) train_sents = list(nltk.corpus.conll2000.iob_sents('train.txt')) test_sents = list(nltk.corpus.conll2000.iob_sents('test.txt')) # train_dataset = [] train_token_list = [] train_POS_tag_list = [] train_chunk_tag_list = [] # test_dataset = [] test_token_list = [] test_POS_tag_list = [] test_chunk_tag_list = [] for sentence in train_sents: for triple in sentence: train_token_list.append(triple[0]) train_POS_tag_list.append(triple[1]) train_chunk_tag_list.append(triple[2]) for sentence in test_sents: for triple in sentence: test_token_list.append(triple[0]) test_POS_tag_list.append(triple[1]) test_chunk_tag_list.append(triple[2]) train_token_frequency = Counter(train_token_list) train_POS_tag_frequency = Counter(train_POS_tag_list) train_NPS_tag_frequency = Counter(train_chunk_tag_list) test_token_frequency = Counter(test_token_list) test_POS_tag_frequency = Counter(test_POS_tag_list) test_NPS_tag_frequency = Counter(test_chunk_tag_list) table_data = [] table_data.append(['Data Statistics', 'Training Data', 'Test Data']) table_data.append( ['Number of sentences', str(len(train_sents)), str(len(test_sents))]) table_data.append([ 'Number of tokens', str(len(train_token_list)), str(len(test_token_list)) ]) table_data.append([ 'POS Tag count', str(len(train_POS_tag_frequency.keys())), str(len(test_POS_tag_frequency.keys())) ]) table_data.append([ 'Chunk Tag count', str(len(train_NPS_tag_frequency.keys())), str(len(test_NPS_tag_frequency.keys())) ]) table_data.append([ 'Vocabulary Size', str(len(train_token_frequency.keys())), str(len(test_token_frequency.keys())) ]) table = AsciiTable(table_data) if stat == True: print(table.table) if predicted_postag != None: pos_repalced_train_sents = [] for sents, postag_seq in zip(train_sents, predicted_postag): pos_repalced_sents = [] for triple, postag in zip(sents, postag_seq): triple = list(triple) triple[1] = postag triple = tuple(triple) pos_repalced_sents.append(triple) pos_repalced_train_sents.append(pos_repalced_sents) return pos_repalced_train_sents, test_sents if predicted_chunk != None: chunk_repalced_train_sents = [] for sents, chunk_seq in zip(train_sents, predicted_chunk): chunk_repalced_sents = [] for triple, chunk in zip(sents, chunk_seq): triple = list(triple) triple[2] = chunk triple = tuple(triple) chunk_repalced_sents.append(triple) chunk_repalced_train_sents.append(chunk_repalced_sents) return chunk_repalced_train_sents, test_sents if predicted_chunk == None and predicted_postag == None: return train_sents, test_sents
def run(): print_environment_info() parser = argparse.ArgumentParser(description="Trains the YOLO model.") parser.add_argument("-m", "--model", type=str, default="config/yolov3.cfg", help="Path to model definition file (.cfg)") parser.add_argument("-d", "--data", type=str, default="config/coco.data", help="Path to data config file (.data)") parser.add_argument("-e", "--epochs", type=int, default=300, help="Number of epochs") parser.add_argument("-v", "--verbose", action='store_true', help="Makes the training more verbose") parser.add_argument("--n_cpu", type=int, default=8, help="Number of cpu threads to use during batch generation") parser.add_argument("--pretrained_weights", type=str, help="Path to checkpoint file (.weights or .pth). Starts training from checkpoint model") parser.add_argument("--checkpoint_interval", type=int, default=1, help="Interval of epochs between saving model weights") parser.add_argument("--evaluation_interval", type=int, default=1, help="Interval of epochs between evaluations on validation set") parser.add_argument("--multiscale_training", action="store_false", help="Allow for multi-scale training") parser.add_argument("--iou_thres", type=float, default=0.5, help="Evaluation: IOU threshold required to qualify as detected") parser.add_argument("--conf_thres", type=float, default=0.1, help="Evaluation: Object confidence threshold") parser.add_argument("--nms_thres", type=float, default=0.5, help="Evaluation: IOU threshold for non-maximum suppression") parser.add_argument("--logdir", type=str, default="logs", help="Directory for training log files (e.g. for TensorBoard)") args = parser.parse_args() print("Command line arguments: {}".format(args)) logger = Logger(args.logdir) # Tensorboard logger # Create output directories if missing os.makedirs("output", exist_ok=True) os.makedirs("checkpoints", exist_ok=True) # Get data configuration data_config = parse_data_config(args.data) train_path = data_config["train"] valid_path = data_config["valid"] class_names = load_classes(data_config["names"]) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # ############ # Create model # ############ model = load_model(args.model, args.pretrained_weights) # Print model if args.verbose: summary(model, input_size=(3, model.hyperparams['height'], model.hyperparams['height'])) mini_batch_size = model.hyperparams['batch'] // model.hyperparams['subdivisions'] # ################# # Create Dataloader # ################# # Load training dataloader dataloader = _create_data_loader( train_path, mini_batch_size, model.hyperparams['height'], args.n_cpu, args.multiscale_training) # Load validation dataloader validation_dataloader = _create_validation_data_loader( valid_path, mini_batch_size, model.hyperparams['height'], args.n_cpu) # ################ # Create optimizer # ################ params = [p for p in model.parameters() if p.requires_grad] if (model.hyperparams['optimizer'] in [None, "adam"]): optimizer = optim.Adam( params, lr=model.hyperparams['learning_rate'], weight_decay=model.hyperparams['decay'], ) elif (model.hyperparams['optimizer'] == "sgd"): optimizer = optim.SGD( params, lr=model.hyperparams['learning_rate'], weight_decay=model.hyperparams['decay'], momentum=model.hyperparams['momentum']) else: print("Unknown optimizer. Please choose between (adam, sgd).") for epoch in range(args.epochs): print("\n---- Training Model ----") model.train() # Set model to training mode for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Training Epoch {}".format(epoch))): batches_done = len(dataloader) * epoch + batch_i imgs = imgs.to(device, non_blocking=True) targets = targets.to(device) outputs = model(imgs) loss, loss_components = compute_loss(outputs, targets, model) loss.backward() ############### # Run optimizer ############### if batches_done % model.hyperparams['subdivisions'] == 0: # Adapt learning rate # Get learning rate defined in cfg lr = model.hyperparams['learning_rate'] if batches_done < model.hyperparams['burn_in']: # Burn in lr *= (batches_done / model.hyperparams['burn_in']) else: # Set and parse the learning rate to the steps defined in the cfg for threshold, value in model.hyperparams['lr_steps']: if batches_done > threshold: lr *= value # Log the learning rate logger.scalar_summary("train/learning_rate", lr, batches_done) # Set learning rate for g in optimizer.param_groups: g['lr'] = lr # Run optimizer optimizer.step() # Reset gradients optimizer.zero_grad() # ############ # Log progress # ############ if args.verbose: print(AsciiTable( [ ["Type", "Value"], ["IoU loss", float(loss_components[0])], ["Object loss", float(loss_components[1])], ["Class loss", float(loss_components[2])], ["Loss", float(loss_components[3])], ["Batch loss", to_cpu(loss).item()], ]).table) # Tensorboard logging tensorboard_log = [ ("train/iou_loss", float(loss_components[0])), ("train/obj_loss", float(loss_components[1])), ("train/class_loss", float(loss_components[2])), ("train/loss", to_cpu(loss).item())] logger.list_of_scalars_summary(tensorboard_log, batches_done) model.seen += imgs.size(0) # ############# # Save progress # ############# # Save model to checkpoint file if epoch % args.checkpoint_interval == 0: checkpoint_path = "checkpoints/yolov3_ckpt_{}.pth".format(epoch) print("---- Saving checkpoint to: '{}' ----".format(checkpoint_path)) torch.save(model.state_dict(), checkpoint_path) # ######## # Evaluate # ######## if epoch % args.evaluation_interval == 0: print("\n---- Evaluating Model ----") # Evaluate the model on the validation set metrics_output = _evaluate( model, validation_dataloader, class_names, img_size=model.hyperparams['height'], iou_thres=args.iou_thres, conf_thres=args.conf_thres, nms_thres=args.nms_thres, verbose=args.verbose ) if metrics_output is not None: precision, recall, AP, f1, ap_class = metrics_output evaluation_metrics = [ ("validation/precision", precision.mean()), ("validation/recall", recall.mean()), ("validation/mAP", AP.mean()), ("validation/f1", f1.mean())] logger.list_of_scalars_summary(evaluation_metrics, epoch)
def train(model, optimizer, dataloader, epoch, opt, logger, visualizer=None): for i, (images, targets) in enumerate(dataloader): # targets: [idx, class_id, x, y, h, w] in yolo format # idx is used to associate the bounding boxes with its image # skip images without bounding boxes (mainly because coco has unlabelled images) if targets.size(0) == 0: continue batches_done = len(dataloader) * epoch + i if not opt.no_cuda: model = model.to(opt.device) images = Variable(images.to(opt.device)) if targets is not None: targets = Variable(targets.to(opt.device), requires_grad=False) loss, detections = model.forward(images, targets) detections = non_max_suppression(detections.cpu(), opt.conf_thres, opt.nms_thres) loss.backward() if batches_done % opt.gradient_accumulations == 0 or i == len( dataloader) - 1: optimizer.step() optimizer.zero_grad() # logging metric_keys = model.yolo_layer52.metrics.keys() yolo_metrics = [ model.yolo_layer52.metrics, model.yolo_layer26.metrics, model.yolo_layer13.metrics ] metric_table_data = [[ 'Metrics', 'YOLO Layer 0', 'YOLO Layer 1', 'YOLO Layer 2' ]] formats = {m: '%.6f' for m in metric_keys} for metric in metric_keys: row_metrics = [ formats[metric] % ym.get(metric, 0) for ym in yolo_metrics ] metric_table_data += [[metric, *row_metrics]] metric_table_data += [[ 'total loss', '{:.6f}'.format(loss.item()), '', '' ]] # beautify log message metric_table = AsciiTable( metric_table_data, title='[Epoch {:d}/{:d}, Batch {:d}/{:d}]'.format( epoch, opt.num_epochs, i, len(dataloader))) metric_table.inner_footing_row_border = True logger.print_and_write('{}\n\n\n'.format(metric_table.table)) if visualizer is not None and not opt.no_vis_preds: visualizer.plot_predictions(images.cpu(), detections, env='main') # plot prediction if visualizer is not None and not opt.no_vis_gt: visualizer.plot_ground_truth(images.cpu(), targets.cpu(), env='main') # plot ground truth metrics_to_vis = [] # uncomment code below to plot the metrics of each YOLO layer # for j, ym in enumerate(yolo_metrics): # for key, metric in ym.items(): # if key != 'grid_size': # metrics_to_vis += [('{}_yolo_layer_{}'.format(key, j), metric)] metrics_to_vis += [('total_loss', loss.item())] if visualizer is not None: visualizer.plot_metrics(metrics_to_vis, batches_done, env='main') # save checkpoints if epoch % opt.checkpoint_interval == 0: save_file_path = os.path.join(opt.checkpoint_path, 'epoch_{}.pth'.format(epoch)) states = { 'epoch': epoch + 1, 'model': opt.model, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), } torch.save(states, save_file_path)
def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=None, metric_items=None): """Evaluation in COCO protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float], optional): IoU threshold used for evaluating recalls/mAPs. If set to a list, the average of all IoUs will also be computed. If not specified, [0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. Default: None. metric_items (list[str] | str, optional): Metric items that will be returned. If not specified, ``['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when ``metric=='bbox' or metric=='segm'``. Returns: dict[str, float]: COCO style evaluation metric. """ metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') if iou_thrs is None: iou_thrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) if metric_items is not None: if not isinstance(metric_items, list): metric_items = [metric_items] result_files, tmp_dir = self.format_results(results, jsonfile_prefix) eval_results = {} cocoGt = self.coco for metric in metrics: msg = f'Evaluating {metric}...' if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': ar = self.fast_eval_recall(results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for i, num in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue if metric not in result_files: raise KeyError(f'{metric} is not in results') try: cocoDt = cocoGt.loadRes(result_files[metric]) except IndexError: print_log('The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = 'bbox' if metric == 'proposal' else metric cocoEval = COCOeval(cocoGt, cocoDt, iou_type) cocoEval.params.catIds = self.cat_ids cocoEval.params.imgIds = self.img_ids cocoEval.params.maxDets = list(proposal_nums) cocoEval.params.iouThrs = iou_thrs # mapping of cocoEval.stats coco_metric_names = { 'mAP': 0, 'mAP_50': 1, 'mAP_75': 2, 'mAP_s': 3, 'mAP_m': 4, 'mAP_l': 5, 'AR@100': 6, 'AR@300': 7, 'AR@1000': 8, 'AR_s@1000': 9, 'AR_m@1000': 10, 'AR_l@1000': 11 } if metric_items is not None: for metric_item in metric_items: if metric_item not in coco_metric_names: raise KeyError( f'metric item {metric_item} is not supported') if metric == 'proposal': cocoEval.params.useCats = 0 cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if metric_items is None: metric_items = [ 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ] for item in metric_items: val = float( f'{cocoEval.stats[coco_metric_names[item]]:.3f}') eval_results[item] = val else: cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if classwise: # Compute per-category AP # Compute per-category AP # from https://github.com/facebookresearch/detectron2/ precisions = cocoEval.eval['precision'] # precision: (iou, recall, cls, area range, max dets) assert len(self.cat_ids) == precisions.shape[2] results_per_category = [] for idx, catId in enumerate(self.cat_ids): # area range index 0: all area ranges # max dets index -1: typically 100 per image nm = self.coco.loadCats(catId)[0] precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] if precision.size: ap = np.mean(precision) else: ap = float('nan') results_per_category.append( (f'{nm["name"]}', f'{float(ap):0.3f}')) num_columns = min(6, len(results_per_category) * 2) results_flatten = list( itertools.chain(*results_per_category)) headers = ['category', 'AP'] * (num_columns // 2) results_2d = itertools.zip_longest(*[ results_flatten[i::num_columns] for i in range(num_columns) ]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) if metric_items is None: metric_items = [ 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' ] for metric_item in metric_items: key = f'{metric}_{metric_item}' val = float( f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' ) eval_results[key] = val ap = cocoEval.stats[:6] eval_results[f'{metric}_mAP_copypaste'] = ( f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' f'{ap[4]:.3f} {ap[5]:.3f}') if tmp_dir is not None: tmp_dir.cleanup() return eval_results
def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True)) output_list = [] for out in args.outputs: output_list.append(mmcv.load(out)) if args.score_weights: weights = np.array(args.score_weights) / sum(args.score_weights) else: weights = [1. / len(output_list) for _ in output_list] def merge_scores(idx): def merge_part(arrs, index, weights): if arrs[0][index] is not None: return np.sum([a[index] * w for a, w in zip(arrs, weights)], axis=0) else: return None results = [output[idx] for output in output_list] rel_props = output_list[0][idx][0] return (rel_props, merge_part(results, 1, weights), merge_part(results, 2, weights), merge_part(results, 3, weights)) print('Merge detection scores from {} sources'.format(len(output_list))) outputs = [merge_scores(idx) for idx in range(len(dataset))] print('Merge finished') eval_type = args.eval if eval_type: print('Starting evaluate {}'.format(eval_type)) detections = results2det(dataset, outputs, **cfg.test_cfg.ssn.evaluater) if not args.no_regression: print("Performing location regression") for cls in range(len(detections)): detections[cls] = { k: perform_regression(v) for k, v in detections[cls].items() } print("Regression finished") print("Performing NMS") for cls in range(len(detections)): detections[cls] = { k: temporal_nms(v, cfg.test_cfg.ssn.evaluater.nms) for k, v in detections[cls].items() } print("NMS finished") if eval_type == 'activitynet': iou_range = np.arange(0.5, 1.0, 0.05) elif eval_type == 'thumos14': iou_range = np.arange(0.1, 1.0, .1) # iou_range = [0.5] # get gt all_gt = pd.DataFrame(dataset.get_all_gt(), columns=['video-id', 'cls', 't-start', 't-end']) gt_by_cls = [ all_gt[all_gt.cls == cls].reset_index(drop=True).drop('cls', 1) for cls in range(len(detections)) ] plain_detections = [ det2df(detections, cls) for cls in range(len(detections)) ] ap_values = eval_ap_parallel(plain_detections, gt_by_cls, iou_range) map_iou = ap_values.mean(axis=0) print("Evaluation finished") # display display_title = 'Temporal detection performance ({})'.format(args.eval) display_data = [['IoU thresh'], ['mean AP']] for i in range(len(iou_range)): display_data[0].append('{:.02f}'.format(iou_range[i])) display_data[1].append('{:.04f}'.format(map_iou[i])) table = AsciiTable(display_data, display_title) table.justify_columns[-1] = 'right' table.inner_footing_row_border = True print(table.table)
def run_model(args, currentmodelrun, modelend, numbermodelruns, inputfile, usernamespace): """Runs a model - processes the input file; builds the Yee cells; calculates update coefficients; runs main FDTD loop. Args: args (dict): Namespace with command line arguments currentmodelrun (int): Current model run number. modelend (int): Number of last model to run. numbermodelruns (int): Total number of model runs. inputfile (object): File object for the input file. usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file. Returns: tsolve (int): Length of time (seconds) of main FDTD calculations """ # Monitor memory usage p = psutil.Process() # Declare variable to hold FDTDGrid class global G # Used for naming geometry and output files appendmodelnumber = '' if numbermodelruns == 1 and not args.task and not args.restart else str( currentmodelrun) # Normal model reading/building process; bypassed if geometry information to be reused if 'G' not in globals(): # Initialise an instance of the FDTDGrid class G = FDTDGrid() # Get information about host machine G.hostinfo = get_host_info() # Single GPU object if args.gpu: G.gpu = args.gpu G.inputfilename = os.path.split(inputfile.name)[1] G.inputdirectory = os.path.dirname(os.path.abspath(inputfile.name)) inputfilestr = '\n--- Model {}/{}, input file: {}'.format( currentmodelrun, modelend, inputfile.name) print(Fore.GREEN + '{} {}\n'.format( inputfilestr, '-' * (get_terminal_width() - 1 - len(inputfilestr))) + Style.RESET_ALL) # Add the current model run to namespace that can be accessed by # user in any Python code blocks in input file usernamespace['current_model_run'] = currentmodelrun # Read input file and process any Python and include file commands processedlines = process_python_include_code(inputfile, usernamespace) # Print constants/variables in user-accessable namespace uservars = '' for key, value in sorted(usernamespace.items()): if key != '__builtins__': uservars += '{}: {}, '.format(key, value) print( 'Constants/variables used/available for Python scripting: {{{}}}\n' .format(uservars[:-2])) # Write a file containing the input commands after Python or include file commands have been processed if args.write_processed: write_processed_file(processedlines, appendmodelnumber, G) # Check validity of command names and that essential commands are present singlecmds, multicmds, geometry = check_cmd_names(processedlines) # Create built-in materials m = Material(0, 'pec') m.se = float('inf') m.type = 'builtin' m.averagable = False G.materials.append(m) m = Material(1, 'free_space') m.type = 'builtin' G.materials.append(m) # Process parameters for commands that can only occur once in the model process_singlecmds(singlecmds, G) # Process parameters for commands that can occur multiple times in the model print() process_multicmds(multicmds, G) # Initialise an array for volumetric material IDs (solid), boolean # arrays for specifying materials not to be averaged (rigid), # an array for cell edge IDs (ID) G.initialise_geometry_arrays() # Initialise arrays for the field components G.initialise_field_arrays() # Process geometry commands in the order they were given process_geometrycmds(geometry, G) # Build the PMLs and calculate initial coefficients print() if all(value == 0 for value in G.pmlthickness.values()): if G.messages: print('PML boundaries: switched off') pass # If all the PMLs are switched off don't need to build anything else: if G.messages: if all(value == G.pmlthickness['x0'] for value in G.pmlthickness.values()): pmlinfo = str(G.pmlthickness['x0']) + ' cells' else: pmlinfo = '' for key, value in G.pmlthickness.items(): pmlinfo += '{}: {} cells, '.format(key, value) pmlinfo = pmlinfo[:-2] print('PML boundaries: {}'.format(pmlinfo)) pbar = tqdm(total=sum(1 for value in G.pmlthickness.values() if value > 0), desc='Building PML boundaries', ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable) build_pmls(G, pbar) pbar.close() # Build the model, i.e. set the material properties (ID) for every edge # of every Yee cell print() pbar = tqdm(total=2, desc='Building main grid', ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable) build_electric_components(G.solid, G.rigidE, G.ID, G) pbar.update() build_magnetic_components(G.solid, G.rigidH, G.ID, G) pbar.update() pbar.close() # Process any voltage sources (that have resistance) to create a new # material at the source location for voltagesource in G.voltagesources: voltagesource.create_material(G) # Initialise arrays of update coefficients to pass to update functions G.initialise_std_update_coeff_arrays() # Initialise arrays of update coefficients and temporary values if # there are any dispersive materials if Material.maxpoles != 0: # Update estimated memory (RAM) usage memestimate = memory_usage(G) # Check if model can be built and/or run on host if memestimate > G.hostinfo['ram']: raise GeneralError( 'Estimated memory (RAM) required ~{} exceeds {} detected!\n' .format( human_size(memestimate), human_size(G.hostinfo['ram'], a_kilobyte_is_1024_bytes=True))) # Check if model can be run on specified GPU if required if G.gpu is not None: if memestimate > G.gpu.totalmem: raise GeneralError( 'Estimated memory (RAM) required ~{} exceeds {} detected on specified {} - {} GPU!\n' .format( human_size(memestimate), human_size(G.gpu.totalmem, a_kilobyte_is_1024_bytes=True), G.gpu.deviceID, G.gpu.name)) if G.messages: print('Estimated memory (RAM) required: ~{}'.format( human_size(memestimate))) G.initialise_dispersive_arrays() # Process complete list of materials - calculate update coefficients, # store in arrays, and build text list of materials/properties materialsdata = process_materials(G) if G.messages: print('\nMaterials:') materialstable = AsciiTable(materialsdata) materialstable.outer_border = False materialstable.justify_columns[0] = 'right' print(materialstable.table) # Check to see if numerical dispersion might be a problem results = dispersion_analysis(G) if not results['waveform']: print( Fore.RED + "\nWARNING: Numerical dispersion analysis not carried out as either no waveform detected or waveform does not fit within specified time window and is therefore being truncated." + Style.RESET_ALL) elif results['N'] < G.mingridsampling: raise GeneralError( "Non-physical wave propagation: Material '{}' has wavelength sampled by {} cells, less than required minimum for physical wave propagation. Maximum significant frequency estimated as {:g}Hz" .format(results['material'].ID, results['N'], results['maxfreq'])) elif results['deltavp'] and np.abs( results['deltavp']) > G.maxnumericaldisp: print( Fore.RED + "\nWARNING: Potentially significant numerical dispersion. Estimated largest physical phase-velocity error is {:.2f}% in material '{}' whose wavelength sampled by {} cells. Maximum significant frequency estimated as {:g}Hz" .format(results['deltavp'], results['material'].ID, results['N'], results['maxfreq']) + Style.RESET_ALL) elif results['deltavp'] and G.messages: print( "\nNumerical dispersion analysis: estimated largest physical phase-velocity error is {:.2f}% in material '{}' whose wavelength sampled by {} cells. Maximum significant frequency estimated as {:g}Hz" .format(results['deltavp'], results['material'].ID, results['N'], results['maxfreq'])) # If geometry information to be reused between model runs else: inputfilestr = '\n--- Model {}/{}, input file (not re-processed, i.e. geometry fixed): {}'.format( currentmodelrun, modelend, inputfile.name) print(Fore.GREEN + '{} {}\n'.format( inputfilestr, '-' * (get_terminal_width() - 1 - len(inputfilestr))) + Style.RESET_ALL) # Clear arrays for field components G.initialise_field_arrays() # Clear arrays for fields in PML for pml in G.pmls: pml.initialise_field_arrays() # Adjust position of simple sources and receivers if required if G.srcsteps[0] != 0 or G.srcsteps[1] != 0 or G.srcsteps[2] != 0: for source in itertools.chain(G.hertziandipoles, G.magneticdipoles): if currentmodelrun == 1: if source.xcoord + G.srcsteps[ 0] * modelend < 0 or source.xcoord + G.srcsteps[ 0] * modelend > G.nx or source.ycoord + G.srcsteps[ 1] * modelend < 0 or source.ycoord + G.srcsteps[ 1] * modelend > G.ny or source.zcoord + G.srcsteps[ 2] * modelend < 0 or source.zcoord + G.srcsteps[ 2] * modelend > G.nz: raise GeneralError( 'Source(s) will be stepped to a position outside the domain.' ) source.xcoord = source.xcoordorigin + (currentmodelrun - 1) * G.srcsteps[0] source.ycoord = source.ycoordorigin + (currentmodelrun - 1) * G.srcsteps[1] source.zcoord = source.zcoordorigin + (currentmodelrun - 1) * G.srcsteps[2] if G.rxsteps[0] != 0 or G.rxsteps[1] != 0 or G.rxsteps[2] != 0: for receiver in G.rxs: if currentmodelrun == 1: if receiver.xcoord + G.rxsteps[ 0] * modelend < 0 or receiver.xcoord + G.rxsteps[ 0] * modelend > G.nx or receiver.ycoord + G.rxsteps[ 1] * modelend < 0 or receiver.ycoord + G.rxsteps[ 1] * modelend > G.ny or receiver.zcoord + G.rxsteps[ 2] * modelend < 0 or receiver.zcoord + G.rxsteps[ 2] * modelend > G.nz: raise GeneralError( 'Receiver(s) will be stepped to a position outside the domain.' ) receiver.xcoord = receiver.xcoordorigin + (currentmodelrun - 1) * G.rxsteps[0] receiver.ycoord = receiver.ycoordorigin + (currentmodelrun - 1) * G.rxsteps[1] receiver.zcoord = receiver.zcoordorigin + (currentmodelrun - 1) * G.rxsteps[2] # Write files for any geometry views and geometry object outputs if not (G.geometryviews or G.geometryobjectswrite) and args.geometry_only: print( Fore.RED + '\nWARNING: No geometry views or geometry objects to output found.' + Style.RESET_ALL) if G.geometryviews: print() for i, geometryview in enumerate(G.geometryviews): geometryview.set_filename(appendmodelnumber, G) pbar = tqdm(total=geometryview.datawritesize, unit='byte', unit_scale=True, desc='Writing geometry view file {}/{}, {}'.format( i + 1, len(G.geometryviews), os.path.split(geometryview.filename)[1]), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable) geometryview.write_vtk(G, pbar) pbar.close() if G.geometryobjectswrite: for i, geometryobject in enumerate(G.geometryobjectswrite): pbar = tqdm(total=geometryobject.datawritesize, unit='byte', unit_scale=True, desc='Writing geometry object file {}/{}, {}'.format( i + 1, len(G.geometryobjectswrite), os.path.split(geometryobject.filename)[1]), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable) geometryobject.write_hdf5(G, pbar) pbar.close() # If only writing geometry information if args.geometry_only: tsolve = 0 # Run simulation else: # Prepare any snapshot files for snapshot in G.snapshots: snapshot.prepare_vtk_imagedata(appendmodelnumber, G) # Output filename inputfileparts = os.path.splitext( os.path.join(G.inputdirectory, G.inputfilename)) outputfile = inputfileparts[0] + appendmodelnumber + '.out' print('\nOutput file: {}\n'.format(outputfile)) # Main FDTD solving functions for either CPU or GPU if G.gpu is None: tsolve = solve_cpu(currentmodelrun, modelend, G) else: tsolve = solve_gpu(currentmodelrun, modelend, G) # Write an output file in HDF5 format write_hdf5_outputfile(outputfile, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz, G) if G.messages: print('Memory (RAM) used: ~{}'.format( human_size(p.memory_info().rss))) print('Solving time [HH:MM:SS]: {}'.format( datetime.timedelta(seconds=tsolve))) # If geometry information to be reused between model runs then FDTDGrid # class instance must be global so that it persists if not args.geometry_fixed: del G return tsolve
def collect(self) -> 'Assignment': """Collect an assignment. **Commits Instructors** **Pushes Instructors** Copy your students' notebooks from the fileserver into the instructors repo `submitted/` directory. This also creates a submission in the gradebook on behalf of each student. If this is not done, then autograding doesn't record grades in the gradebook. :return: The assignment object to allow for method chaining. :rtype: Assignment """ print(utils.banner(f"Collecting {self.name}")) try: student_ids = map(lambda stu: stu.get('id'), self.course.students) except Exception: sys.exit( "No students found. Please run `course.get_students_from_canvas()` before collecting an assignment." ) # If we're using a ZFS fileserver, we need to look for the relevant snapshots if self.course.zfs: # List all of the snapshots available and parse their dates snapshot_names = os.listdir( os.path.join(self.course.storage_path, '.zfs' 'snapshot')) snapshot_name = self._find_closest_snapshot( snapshot_names, snapshot_regex=self.course.zfs_regex, datetime_pattern=self.course.zfs_datetime_pattern) zfs_path = os.path.join('.zfs', 'snapshot', snapshot_name) assignment_collection_header = [['Student ID', 'Collection Status']] assignment_collection_status = [] # get the assignment path for each student ID in Canvas for student_id in student_ids: student_path = os.path.join(self.course.storage_path, student_id, self.course.stu_repo_name, self.course.assignment_release_path) # If zfs, use the student + zfs + assignment name path # This works because our ZFS snapshots are recursive. if self.course.zfs and os.path.exists( os.path.join(student_path, '.zfs')): # Check that we're using ZFS assignment_path = os.path.join(student_path, zfs_path, self.name) # otherwise just use the student's work directly else: assignment_path = os.path.join(student_path, self.name) submission_path = os.path.join(self.course.working_directory, 'submitted', student_id, self.name) # then copy the work into the submitted directory + student_id + assignment_name # Since we're JUST copying the current assignment, we can safely overwrite it try: shutil.rmtree(submission_path) # Doesn't matter if it doesn't exist though except: pass try: shutil.copytree(assignment_path, submission_path) # if no assignment for that student, fail #* NOTE: could also be due to incorrect directory structure. except FileNotFoundError: assignment_collected = False assignment_collection_status.append( [student_id, f'{utils.color.RED}failure{utils.color.END}']) else: assignment_collected = True assignment_collection_status.append([ student_id, f'{utils.color.GREEN}success{utils.color.END}' ]) # **CRUCIALLY IMPORTANT** # If the assignment was successfully collected, create a submission in gradebook for the student. # If this is never done, then autograding doesn't # record grades in the gradebook. if assignment_collected: try: self.course.nb_api.gradebook.add_submission( self.name, student_id) self.course.nb_api.gradebook.close() except Exception as e: self.course.nb_api.gradebook.close() raise e table = AsciiTable(assignment_collection_header + assignment_collection_status) table.title = 'Assignment Collection' print(table.table) # Do not exit if either of these operations fails. We wish to be able to # finish autograding even if we can't commit/push. try: utils.commit_repo(self.course.working_directory, f'Collected {self.name}') except Exception as e: print('\n') print('Error committing to your instructors repository:') print(e) else: print('\n') try: utils.push_repo(self.course.working_directory) except Exception as e: print('Error pushing your repository:') print(e) return self
def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000), classwise=True): for res_type in result_types: assert res_type in [ 'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints' ] if mmcv.is_str(coco): coco = COCO(coco) assert isinstance(coco, COCO) if result_types == ['proposal_fast']: ar = fast_eval_recall(result_files, coco, np.array(max_dets)) for i, num in enumerate(max_dets): print('AR@{}\t= {:.4f}'.format(num, ar[i])) return for res_type in result_types: if isinstance(result_files, str): result_file = result_files elif isinstance(result_files, dict): result_file = result_files[res_type] else: assert TypeError('result_files must be a str or dict') assert result_file.endswith('.json') coco_dets = coco.loadRes(result_file) img_ids = coco.getImgIds() iou_type = 'bbox' if res_type == 'proposal' else res_type cocoEval = COCOeval(coco, coco_dets, iou_type) cocoEval.params.imgIds = img_ids if res_type == 'proposal': cocoEval.params.useCats = 0 cocoEval.params.maxDets = list(max_dets) cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if classwise: # Compute per-category AP # from https://github.com/facebookresearch/detectron2/blob/03064eb5bafe4a3e5750cc7a16672daf5afe8435/detectron2/evaluation/coco_evaluation.py#L259-L283 # noqa precisions = cocoEval.eval['precision'] catIds = coco.getCatIds() # precision has dims (iou, recall, cls, area range, max dets) assert len(catIds) == precisions.shape[2] results_per_category = [] for idx, catId in enumerate(catIds): # area range index 0: all area ranges # max dets index -1: typically 100 per image nm = coco.loadCats(catId)[0] precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float('nan') results_per_category.append( ('{}'.format(nm['name']), '{:0.3f}'.format(float(ap * 100)))) N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) headers = ['category', 'AP'] * (N_COLS // 2) results_2d = itertools.zip_longest( *[results_flatten[i::N_COLS] for i in range(N_COLS)]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print(table.table)
def evaluate(self, results, metric='mIoU', logger=None, efficient_test=False, **kwargs): """Evaluate the dataset. Args: results (list): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. 'mIoU' and 'mDice' are supported. logger (logging.Logger | None | str): Logger used for printing related information during evaluation. Default: None. Returns: dict[str, float]: Default metrics. """ if isinstance(metric, str): metric = [metric] allowed_metrics = ['mIoU', 'mDice'] if not set(metric).issubset(set(allowed_metrics)): raise KeyError('metric {} is not supported'.format(metric)) eval_results = {} gt_seg_maps = self.get_gt_seg_maps(efficient_test) if self.CLASSES is None: num_classes = len( reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps])) else: num_classes = len(self.CLASSES) ret_metrics = eval_metrics( results, gt_seg_maps, num_classes, self.ignore_index, metric, label_map=self.label_map, reduce_zero_label=self.reduce_zero_label) class_table_data = [['Class'] + [m[1:] for m in metric] + ['Acc']] if self.CLASSES is None: class_names = tuple(range(num_classes)) else: class_names = self.CLASSES ret_metrics_round = [ np.round(ret_metric * 100, 2) for ret_metric in ret_metrics ] for i in range(num_classes): class_table_data.append([class_names[i]] + [m[i] for m in ret_metrics_round[2:]] + [ret_metrics_round[1][i]]) summary_table_data = [['Scope'] + ['m' + head for head in class_table_data[0][1:]] + ['aAcc']] ret_metrics_mean = [ np.round(np.nanmean(ret_metric) * 100, 2) for ret_metric in ret_metrics ] summary_table_data.append(['global'] + ret_metrics_mean[2:] + [ret_metrics_mean[1]] + [ret_metrics_mean[0]]) print_log('per class results:', logger) table = AsciiTable(class_table_data) print_log('\n' + table.table, logger=logger) print_log('Summary:', logger) table = AsciiTable(summary_table_data) print_log('\n' + table.table, logger=logger) for i in range(1, len(summary_table_data[0])): eval_results[summary_table_data[0] [i]] = summary_table_data[1][i] / 100.0 if mmcv.is_list_of(results, str): for file_name in results: os.remove(file_name) return eval_results
def print_map_summary(mean_ap, results, dataset=None, scale_ranges=None, logger=None): """Print mAP and results of each class. A table will be printed to show the gts/dets/recall/AP of each class and the mAP. Args: mean_ap (float): Calculated from `eval_map()`. results (list[dict]): Calculated from `eval_map()`. dataset (list[str] | str | None): Dataset name or dataset classes. scale_ranges (list[tuple] | None): Range of scales to be evaluated. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. """ if logger == 'silent': return if isinstance(results[0]['ap'], np.ndarray): num_scales = len(results[0]['ap']) else: num_scales = 1 if scale_ranges is not None: assert len(scale_ranges) == num_scales num_classes = len(results) recalls = np.zeros((num_scales, num_classes), dtype=np.float32) aps = np.zeros((num_scales, num_classes), dtype=np.float32) num_gts = np.zeros((num_scales, num_classes), dtype=int) for i, cls_result in enumerate(results): if cls_result['recall'].size > 0: recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] aps[:, i] = cls_result['ap'] num_gts[:, i] = cls_result['num_gts'] if dataset is None: label_names = [str(i) for i in range(num_classes)] elif mmcv.is_str(dataset): label_names = get_classes(dataset) else: label_names = dataset if not isinstance(mean_ap, list): mean_ap = [mean_ap] header = ['class', 'gts', 'dets', 'recall', 'ap'] for i in range(num_scales): if scale_ranges is not None: print_log(f'Scale range {scale_ranges[i]}', logger=logger) table_data = [header] for j in range(num_classes): row_data = [ label_names[j], num_gts[i, j], results[j]['num_dets'], f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' ] table_data.append(row_data) table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) table = AsciiTable(table_data) table.inner_footing_row_border = True print_log('\n' + table.table, logger=logger)
def draw(self): res = os.system('cls') if res != 0: os.system('clear') header_table_data = [ ['System', 'Station', 'Node', 'Buy Here', 'Profit'], [ None if self.current['system'] is None else self.current['system'].name, None if self.current['station'] is None else self.current['station'].name, self.current['node'], '<none>' if self.current["node"] is None else ', '.join( [q['name'] for q in self.current["node"].commodities]), self.profit ] ] header_table_data[0] = [ AsciiControl.HEADER + q + AsciiControl.ENDC for q in header_table_data[0] ] header_table = AsciiTable(header_table_data) header_table.title = 'Current Info' print(header_table.table) info_table_data = [[ 'Generated Jump', 'Sell Here', 'Generated Commodity', 'Proposed Jump', 'Proposed Commodity' ]] info_table_data[0] = [ AsciiControl.HEADER + q + AsciiControl.ENDC for q in info_table_data[0] ] if self.rare: for loop_step, proposed_step in itertools.zip_longest( self.generated_route, self.proposed): loop_step_text = '' loop_step_commodity = '' loop_step_sell = '' if loop_step is not None: loop_step_text = f'{AsciiControl.UNDERLINE}{loop_step["node"].system.name}:{loop_step["node"].station.name}{AsciiControl.ENDC} ({round(loop_step["distance"], 2)} LY)' if loop_step[ "profit"] is not None and loop_step["profit"] > 0: loop_step_commodity = f'{AsciiControl.OKBLUE + loop_step["commodity"].name + AsciiControl.ENDC}:{AsciiControl.OKGREEN if loop_step["profit"] > 1000 else AsciiControl.FAIL}{loop_step["profit"]}{AsciiControl.ENDC} Cz/T ({round(loop_step["updated"], 2)} h)' if loop_step['sell'].__len__() > 0: selling = [ f"{', '.join([z['name'] for z in q.commodities])} ({round(q.system.distance(loop_step['node'].system), 2)})" for q in loop_step['sell'] ] loop_step_sell = ';\n'.join(selling) proposed_step_text = '' proposed_step_commodity = '' if proposed_step is not None: proposed_step_text = f'{AsciiControl.UNDERLINE}{proposed_step["system"]}:{proposed_step["station"]}{AsciiControl.ENDC} ({round(proposed_step["distance"], 2)} LY);' if proposed_step["profit"] is not None and proposed_step[ "profit"] > 0: proposed_step_commodity = f'{AsciiControl.OKBLUE + proposed_step["buy"] + AsciiControl.ENDC} {AsciiControl.OKGREEN if proposed_step["profit"] > 1000 else AsciiControl.FAIL}{proposed_step["profit"]}{AsciiControl.ENDC} Cz/T ({round(proposed_step["updated"], 2)} h)' info_table_data.append([ loop_step_text, loop_step_sell, loop_step_commodity, proposed_step_text, proposed_step_commodity ]) cargo_table_data = [['Cargo', 'Route']] cargo_table_data[0] = [ AsciiControl.HEADER + q + AsciiControl.ENDC for q in cargo_table_data[0] ] for commodity, made_step in itertools.zip_longest( self.cargo, self.route): commodity_text = '' if commodity is not None: commodity_text = f'{AsciiControl.OKGREEN if commodity in self.rare_holder.keys() else AsciiControl.OKBLUE}{commodity}{AsciiControl.ENDC} ({self.cargo[commodity]})' if commodity in self.rare_holder.keys(): dst = self.rare_controller.check_sell( self.current["system"], self.rare_holder[commodity]) if dst is not None: commodity_text += f'{AsciiControl.HEADER} SELL {round(dst, 2)}{AsciiControl.ENDC}' made_step_text = '' if made_step is not None: made_step_text = f'{AsciiControl.UNDERLINE}{made_step["system"].name}:{made_step["station"].name}{AsciiControl.ENDC}' cargo_table_data.append([commodity_text, made_step_text]) info_table = AsciiTable(info_table_data) cargo_table = AsciiTable(cargo_table_data) info_table.inner_row_border = True cargo_table.inner_row_border = True info_table.title = 'Route Options' cargo_table.title = 'Cargo and Route' print(info_table.table) print(cargo_table.table)
row_metrics = [ formats[metric] % yolo.metrics.get(metric, 0) for yolo in model.yolo_layers ] metric_table += [[metric, *row_metrics]] # Tensorboard logging tensorboard_log = [] for j, yolo in enumerate(model.yolo_layers): for name, metric in yolo.metrics.items(): if name != "grid_size": tensorboard_log += [(f"{name}_{j+1}", metric)] tensorboard_log += [("loss", loss.item())] # logger.list_of_scalars_summary(tensorboard_log, batches_done) log_str += AsciiTable(metric_table).table log_str += f"\nTotal loss {loss.item()}" # Determine approximate time left for epoch epoch_batches_left = len(dataloader) - (batch_i + 1) time_left = datetime.timedelta(seconds=epoch_batches_left * (time.time() - start_time) / (batch_i + 1)) log_str += f"\n---- ETA {time_left}" print(log_str) model.seen += imgs.size(0) if epoch % opt.evaluation_interval == 0: print("\n---- Evaluating Model ----")
def block_header(cur): """ block portion of header """ block_data = [ [ "Total :", get(cur, "total_adlist_enabled") + "/" + get(cur, "total_adlist"), ], [ "Our Lists :", get(cur, "our_adlist_enabled") + "/" + get(cur, "our_adlist") ], [ "Others :", get(cur, "other_adlist_enabled") + "/" + get(cur, "other_adlist"), ], ] block_table = AsciiTable(block_data) block_table.inner_heading_row_border = False block_table.outer_border = False block_table.inner_row_border = False block_table.inner_column_border = False rows = adlist_top3_by_comment(cur) t3_block_data = [] for row in rows: t3_block_data.append([row[0], row[1]]) t3_block_table = AsciiTable(t3_block_data) t3_block_table.inner_heading_row_border = False t3_block_table.outer_border = False t3_block_table.inner_row_border = False t3_block_table.inner_column_border = False table_data = [ ["Ad/Blocklist Stats", "Top 3 by Comment"], [block_table.table, t3_block_table.table], [], ] table = SingleTable(table_data) table.padding_left = 2 table.outer_border = False utils.info(table.table)
if int(start_node) != 0: print('FIRST ROOM: %s' % start_node) else: print('Please, choose correct start room id [1..N]') return None selected_obj = [] for item in parameters[3:]: if item: print("SELECTED OBJECT: %s" % item) selected_obj.append(item) if len(selected_obj) == 0: print('Please, check json path!') return None maze = Maze(start_node, json_file, selected_obj) result = maze.start_game() return result except Exception as ex: print("Error during execution!") print(ex) return None if __name__ == "__main__": result = execute_code() if result is None: print("No result!") else: print(AsciiTable(result).table)
def test_schema(): exclusions = os.getenv('EXCLUSION_LIST', '["V1__bootstrap.sql"]') exclusions = exclusions.split(',') for item in exclusions: i = re.search('\[\"(.+?)\"\]', item).group(1) exclusions[exclusions.index(item)] = i file_list, file_path = OF.findSQL() file_list, file_path = OF.orderSQL(file_list, file_path) for ex in exclusions: if ex in file_list: i = file_list.index(ex) file_list.pop(i) file_path.pop(i) table_data = [[ Color('{autogreen}File Name{/autogreen}'), 'Create Table', 'Comments', 'Valid JSON', 'Grants', Color('{red}Errors{/red}') ]] Pass_Total, Errors_Total, Flags_Total = 0, 0, 0 ErrorFile, FlagFile = [], [] for i in range(len(file_list)): print(file_list[i]) Pass = True filePath = file_path[i] Schema = CT.find_table(filePath) T = '' JSONcommands = '' Grants = '' Errors = '' if Schema[0] == True: for title in Schema[1]: T = '{green}Pass{/green}' columns = CT.find_columns(filePath, title) comments = CT.check_correct_comments(filePath, title) if comments[0] == True: Com = '{green}Pass{/green}' if comments[0] == False: Pass = False Errors_Total += 1 if file_list[i] not in ErrorFile: ErrorFile.append(file_list[i]) Com = '{red}Fail{/red}\n' Err = '{red}Comments missing{/red}' for key in comments[1].keys(): if comments[1][key][0] == False: Com += str(key) + '\n' Errors += Err + '\n' J = CJ.json_file_read(filePath, title) if J[0] == True: if J[2] == True: JSONcommands = '{green}Pass{/green}' if J[0] == False: Pass = False Errors_Total += 1 if file_list[i] not in ErrorFile: ErrorFile.append(file_list[i]) JSONcommands = '{red}Fail:{/red}\n' Err = '{red}JSON Error line(s){/red}' for e in J[1]: JSONcommands += str(e) + '\n' Errors += Err + '\n' if J[2] == False: Pass = False Flags_Total += 1 FlagFile.append(file_list[i]) Err = '{yellow}Missing infomation{/yellow}' JSONcommands += '{yellow}Warning:{/yellow}\n' if len(J[3]) != 0: JSONcommands += 'Missing label(s)\n' for e in J[3]: JSONcommands += str(e) + '\n' if len(J[4]) != 0: JSONcommands += 'Missing description(s)\n' for e in J[4]: JSONcommands += str(e) + '\n' G = CG.which_grants(filePath, title) if sum((v == True for v in G[0].values())) != G[1]: Pass = False Errors_Total += 1 if file_list[i] not in ErrorFile: ErrorFile.append(file_list[i]) Gpass = False Grants = '{red}Fail{/red}\n' Err = '{red}Access Expected{/red}\n' for key in G[0].keys(): if G[0][key] == False: Grants += '{blue}' + str(key) + '{/blue}\n' if G[0][key] == True: Grants += '{green}' + str(key) + '{/green}\n' Errors += Err + '\n' else: Gpass = True Grants = '{green}Pass{/green}' if Schema[0] == False: T, Com, JSONcommands, Grants, Errors = '---', '---', '---', '---', '---' Pass = False Flags_Total += 1 FlagFile.append(file_list[i]) if Pass == False: table_data.append([ file_list[i], Color(T), Color(Com), Color(JSONcommands), Color(Grants), Color(Errors) ]) else: Pass_Total += 1 table_style = os.environ.get('TABLE_STYLE') if table_style is None: table_style = 'ascii' reportTable = None if table_style == 'ascii': reportTable = AsciiTable(table_data) else: reportTable = SingleTable(table_data) reportTable.inner_row_border = True reportTable.justify_columns = { 0: 'center', 1: 'center', 2: 'center', 3: 'center', 4: 'center', 5: 'center' } Passes = '{green}Files Passed: ' + str(Pass_Total) + '{/green}' Err = '{red}Errors Found: ' + str(Errors_Total) + '{/red}' Flags = '{yellow}Flags Found: ' + str(Flags_Total) + '{/yellow}' EF = '' FF = '' for f in ErrorFile: if len(EF) == 0: EF += str(f) else: EF += '\n' + str(f) ## Option to add a "Flags found" row to the report table, for f in FlagFile: if len(FF) == 0: FF += str(f) else: FF += '\n' + str(f) table_instance = None if table_style == 'ascii': table_instance = AsciiTable( [[Color(Passes), ''], [Color(Err), EF], [Color(Flags), FF]], ' Formatting Summary ') else: table_instance = SingleTable( [[Color(Passes), ''], [Color(Err), EF], [Color(Flags), FF]], ' Formatting Summary ') table_instance.inner_row_border = True print(reportTable.table) print(table_instance.table) if Errors_Total != 0: print('Schema validation failed') exit(1)
def print_table(table_data, end): table = AsciiTable(table_data) if end: print(table.table+"\n") else: print(table.table)
def print_map_summary(mean_ap, results, dataset=None, ranges=None): """Print mAP and results of each class. Args: mean_ap(float): calculated from `eval_map` results(list): calculated from `eval_map` dataset(None or str or list): dataset name or dataset classes. ranges(list or Tuple): ranges of areas """ num_scales = len(results[0]['ap']) if isinstance(results[0]['ap'], np.ndarray) else 1 if ranges is not None: assert len(ranges) == num_scales num_classes = len(results) recalls = np.zeros((num_scales, num_classes), dtype=np.float32) precisions = np.zeros((num_scales, num_classes), dtype=np.float32) f1s = np.zeros((num_scales, num_classes), dtype=np.float32) # modify by Lichao Wang aps = np.zeros((num_scales, num_classes), dtype=np.float32) num_gts = np.zeros((num_scales, num_classes), dtype=int) for i, cls_result in enumerate(results): if cls_result['recall'].size > 0: recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] precisions[:, i] = np.array(cls_result['precision'], ndmin=2)[:, -1] f1s[:, i] = np.array(cls_result['F1-score'], ndmin=2)[:, -1] # modify by Lichao Wang aps[:, i] = cls_result['ap'] num_gts[:, i] = cls_result['num_gts'] if dataset is None: label_names = [str(i) for i in range(1, num_classes + 1)] elif mmcv.is_str(dataset): label_names = get_classes(dataset) else: label_names = dataset if not isinstance(mean_ap, list): mean_ap = [mean_ap] header = ['class', 'gts', 'dets', 'recall', 'precision', 'f1-score', 'ap'] # modify by Lichao Wang for i in range(num_scales): if ranges is not None: print("Area range ", ranges[i]) table_data = [header] for j in range(num_classes): row_data = [ label_names[j], num_gts[i, j], results[j]['num_dets'], '{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(precisions[i, j]), '{:.3f}'.format(f1s[i, j]), # modify by Lichao Wang '{:.3f}'.format(aps[i, j]) ] table_data.append(row_data) table_data.append( ['mAP', '', '', '', '', '', '{:.3f}'.format(mean_ap[i])]) # modify by Lichao Wang table = AsciiTable(table_data) table.inner_footing_row_border = True print(table.table)
"Your car can travel at most 'm' miles on a full tank and you start witha full tank\n"+ "Along your way, there are gas stations at distances stop1, stop2,...,stopn from your home city\n"+ "What is the minimum number of refills needed?"], ['Input', 'The firest line contains an iteger d. The second line contains an integer m.\n'+ 'The third line specifies an integer n. Finally the last line contaisn integers stop1, stop2, stop3,...,stopn\n'+ 'value and the weight of i-th item, respectively'], ['Output Format', 'Minimun number of refills needed. If it is not possible to reach destination output -1'], ['Constraints', f'{boundary_input[0][0]}<=d<={boundary_input[0][1]}; {boundary_input[1][0]}<=m<={boundary_input[1][1]};\n'+ f'{boundary_input[2][0]}<=n<={boundary_input[2][1]}; {boundary_input[3][0]}<stop1<stop2<...<stopn<={boundary_input[3][1]}'], ['Sample', f'input {sample_input_1}, output {sample_output_1}. {sample_1_text}.'], ['Sample', f'input {sample_input_2}, output {sample_output_2}. {sample_2_text}.'], ['Sample', f'input {sample_input_3}, output {sample_output_3}. {sample_3_text}.']] table = AsciiTable(table_data) table.inner_heading_row_border = False print(table.table+"\n") while True: choice = None while choice not in ('s','b','t','a','x'): print("Choose an option:\n") print(f"{Style.BRIGHT}s{Style.RESET_ALL} Execute Sample tests - Test algorithm only") print(f"{Style.BRIGHT}b{Style.RESET_ALL} Execute Boundary tests - Test algorithm only") if DUMMY_MODEL: print(f"{Style.BRIGHT}t{Style.RESET_ALL} Execute Stress tests - Test algorithm vs 'Dummy' algorithm") print(f"{Style.BRIGHT}a{Style.RESET_ALL} Execute all tests without prompts") print(f"{Style.BRIGHT}x{Style.RESET_ALL} Exit") choice = input() print()
def make_player_models(original_model, model_name, final_model=False): df = scraping.parse_salary_file() if PLAYER_LOSS_PLAYER_LIMIT: df = df[0:PLAYER_LOSS_PLAYER_LIMIT] losses = [] widgets = [ ' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ' ] for i in progressbar.progressbar(range(len(df)), widgets=widgets): row = df.iloc[i] player_basketball_reference_id = row['basketball_reference_id'] mapped_data = data.get_mapped_data( player_basketball_reference_id=player_basketball_reference_id) if (len(mapped_data['x']) < PLAYER_MIN_NUMBER_GAMES_PLAYED): continue x_train, x_test, y_train, y_test, sw_train, sw_test = train_test_split( mapped_data['x'], mapped_data['y'], mapped_data['sw'], test_size=0.2, random_state=0) x_train, x_val, y_train, y_val, sw_train, sw_val = train_test_split( x_train, y_train, sw_train, test_size=0.2, random_state=0) if final_model: x_train = mapped_data['x'] y_train = mapped_data['y'] sw_train = mapped_data['sw'] # model = make_model(input_dim=len(x_train[0])) # model.set_weights(original_model.get_weights()) # for layer in model.layers[:-5]: # layer.trainable = False # if os.environ.get('DEBUG') is not None: # for layer in model.layers: # print(layer, layer.trainable) # # model.compile('adam', loss='mse', metrics=['mse']) # model.fit( # x=x_train, # y=y_train, # sample_weight=sw_train, # batch_size=PLAYER_BATCH_SIZE, # epochs=PLAYER_EPOCHS, # verbose=0 if os.environ.get('FINAL', False) else 1, # validation_data=(x_val, y_val, sw_val), # callbacks=[ # EarlyStopping(patience=5) # ] # ) # # y_pred = model.predict(x_test) # mse = mean_squared_error(y_test, y_pred, sample_weight=sw_test) y_pred_og = original_model.predict(x_test) mse_og = mean_squared_error(y_test, y_pred_og, sample_weight=sw_test) player_losses = { 'player_basketball_reference_id': player_basketball_reference_id, 'train_samples': len(x_train), 'test_samples': len(x_test), 'mse_og': mse_og, 'rmse_og': mse_og ** 0.5 # 'mse': mse, # 'rmse': mse ** 0.5 } save_model(None, model_name + '/' + player_basketball_reference_id, player_losses) losses.append(player_losses) losses = sorted(losses, key=lambda k: k['rmse_og']) table_data = [list(losses[0].keys())] + [list(l.values()) for l in losses] print('\n'.join([' '.join([str(c) for c in r]) for r in table_data])) table = AsciiTable(table_data) print(table.table)
print('testing inference time...') pruned_forward_time, output = obtain_avg_forward_time(random_input, model) compact_forward_time1, compact_output1 = obtain_avg_forward_time(random_input, compact_model1) compact_forward_time2, compact_output2 = obtain_avg_forward_time(random_input, compact_model2) metric_table = [ ["Metric", "Before", "After prune channels", "After prune layers(final)"], ["mAP", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric1[0][2]:.6f}', f'{compact_model_metric2[0][2]:.6f}'], ["Parameters", f"{origin_nparameters}", f"{compact_nparameters1}", f"{compact_nparameters2}"], ["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time1:.4f}', f'{compact_forward_time2:.4f}'] ] print(AsciiTable(metric_table).table) pruned_cfg_name = opt.cfg.replace('/', f'/prune_{opt.global_percent}_keep_{opt.layer_keep}_{opt.shortcuts}_shortcut_') pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs) print(f'Config file has been saved: {pruned_cfg_file}') compact_model_name = opt.weights.replace('/', f'/prune_{opt.global_percent}_keep_{opt.layer_keep}_{opt.shortcuts}_shortcut_') if compact_model_name.endswith('.pt'): compact_model_name = compact_model_name.replace('.pt', '.weights') save_weights(compact_model2, path=compact_model_name) print(f'Compact model has been saved: {compact_model_name}')
parser.add_argument('--record_dir', default="./result") parser.add_argument('--record_file', default="result.txt") parser.add_argument('--fl_gamma', default=1.5, type=float, help="focal_loss gamma") parser.add_argument('--save_interval', default=10, type=int) parser.add_argument('--val_interval', default=10, type=int) parser.add_argument("--only_val", action="store_true") parser.add_argument('--only_test', action="store_true") parser.add_argument('--nms_conf_thresh', default=0.05, type=float) parser.add_argument('--nms_iou_thresh', default=0.05, type=float) parser.add_argument('--pred_file', default="pred.json") opt = parser.parse_args() print(AsciiTable([[key, vars(opt)[key]] for key in vars(opt)]).table) device = torch_utils.select_device(opt.device, batch_size=opt.batch_size) cfg = opt.cfg data = opt.data maxdata = opt.maxdata opt.weights = last if opt.resume else opt.weights batch_size = opt.batch_size img_size = opt.img_size epochs = opt.epochs weights = opt.weights # initial training weights, can be . nms_conf_thresh = opt.nms_conf_thresh nms_iou_thresh = opt.nms_iou_thresh # set print results to both terminal and record_file record_dir = opt.record_dir record_file = opt.record_file
def show_summary(self, summary_table): sum_table = AsciiTable(summary_table, "Summary") print sum_table.table