# This is the function that asks the choice of a player and links to the # board squares. # This function is part of main() import pickle from beautifultable import BeautifulTable board = BeautifulTable() board.append_row(['1', '2', '3']) board.append_row(['4', '5', '6']) board.append_row(['7', '8', '9']) print(board) def board_coordinates(choice): square_taken = [] if choice == '1': square_taken.append('1') #print(square_taken) return (0, 0) elif choice == '2': square_taken.append('2') #print(square_taken) return (0, 1) elif choice == '3': square_taken.append('3') return (0, 2) elif choice == '4': square_taken.append('4') return (1, 0) elif choice == '5':
def plan_matrix(n, m): y = np.zeros(shape=(n, m)) for i in range(n): for j in range(m): y[i][j] = random.randint(y_min, y_max) if n > 14: no = n - 14 else: no = 1 x_norm = ccdesign(3, center=(0, no)) x_norm = np.insert(x_norm, 0, 1, axis=1) for i in range(4, 11): x_norm = np.insert(x_norm, i, 0, axis=1) l = 1.215 for i in range(len(x_norm)): for j in range(len(x_norm[i])): if x_norm[i][j] < -1 or x_norm[i][j] > 1: if x_norm[i][j] < 0: x_norm[i][j] = -l else: x_norm[i][j] = l def add_sq_nums(x): for i in range(len(x)): x[i][4] = x[i][1] * x[i][2] x[i][5] = x[i][1] * x[i][3] x[i][6] = x[i][2] * x[i][3] x[i][7] = x[i][1] * x[i][3] * x[i][2] x[i][8] = x[i][1]**2 x[i][9] = x[i][2]**2 x[i][10] = x[i][3]**2 return x x_norm = add_sq_nums(x_norm) x = np.ones(shape=(len(x_norm), len(x_norm[0])), dtype=np.int64) for i in range(8): for j in range(1, 4): if x_norm[i][j] == -1: x[i][j] = x_range[j - 1][0] else: x[i][j] = x_range[j - 1][1] for i in range(8, len(x)): for j in range(1, 3): x[i][j] = (x_range[j - 1][0] + x_range[j - 1][1]) / 2 dx = [ x_range[i][1] - (x_range[i][0] + x_range[i][1]) / 2 for i in range(3) ] x[8][1] = l * dx[0] + x[9][1] x[9][1] = -l * dx[0] + x[9][1] x[10][2] = l * dx[1] + x[9][2] x[11][2] = -l * dx[1] + x[9][2] x[12][3] = l * dx[2] + x[9][3] x[13][3] = -l * dx[2] + x[9][3] x = add_sq_nums(x) x_table = BeautifulTable() for i in range(n): x_table.rows.append([*x[i]]) print('Х matrix:') print(x_table) x_norm_table = BeautifulTable() for i in range(n): x_norm_table.rows.append([*x_norm[i]]) print('Normalized x matrix:') print(x_norm_table) return x, y, x_norm
import mysql.connector import timeit import numpy as np import math from beautifultable import BeautifulTable from mysql.connector import errorcode config = { 'host': 'dbinstance.c6zmxwufoiot.us-east-2.rds.amazonaws.com', 'user': '******', 'password': '******', 'database': 'test' } t = BeautifulTable() t.column_headers = ["Connection #", "Query", "time"] times = [] for i in range(0, 20): try: conn = mysql.connector.connect(**config) #print("Connection established") except mysql.connector.Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: print("Something is wrong with the user name or password") elif err.errno == errorcode.ER_BAD_DB_ERROR: print("Database does not exist") else: print(err) else: cursor = conn.cursor()
def upgrade_package_test( dl_opts: DownloadOptions, new_version, old_version, new_dlstage, old_dlstage, git_version, editions, test_driver ): """process fetch & tests""" test_driver.set_r_limits() lh.configure_logging(test_driver.base_config.verbose) list_all_processes() test_dir = test_driver.base_config.test_data_dir versions = {} fresh_versions = {} version_state_tar = get_tar_file_path( test_driver.launch_dir, [old_version, new_version], test_driver.get_packaging_shorthand() ) read_versions_tar(version_state_tar, versions) print(versions) results = [] # do the actual work: for props in EXECUTION_PLAN: print("Cleaning up" + props.testrun_name) test_driver.run_cleanup(props) print("Cleanup done") for props in EXECUTION_PLAN: if props.directory_suffix not in editions: continue # pylint: disable=unused-variable dl_old = Download( dl_opts, test_driver.base_config.hb_cli_cfg, old_version, props.enterprise, test_driver.base_config.zip_package, test_driver.base_config.src_testing, old_dlstage, versions, fresh_versions, git_version, ) dl_new = Download( dl_opts, test_driver.base_config.hb_cli_cfg, new_version, props.enterprise, test_driver.base_config.zip_package, test_driver.base_config.src_testing, new_dlstage, versions, fresh_versions, git_version, ) if not dl_new.is_different() or not dl_old.is_different(): print("we already tested this version. bye.") return 0 dl_old.get_packages(dl_old.is_different()) dl_new.get_packages(dl_new.is_different()) this_test_dir = test_dir / props.directory_suffix test_driver.reset_test_data_dir(this_test_dir) results.append(test_driver.run_upgrade([dl_old.cfg.version, dl_new.cfg.version], props)) for use_enterprise in [True, False]: results.append( test_driver.run_conflict_tests( [dl_old.cfg.version, dl_new.cfg.version], enterprise=use_enterprise, ) ) results.append( test_driver.run_license_manager_tests( [semver.VersionInfo.parse(dl_old.cfg.version), semver.VersionInfo.parse(dl_new.cfg.version)] ) ) results.append( test_driver.run_debugger_tests( [semver.VersionInfo.parse(dl_old.cfg.version), semver.VersionInfo.parse(dl_new.cfg.version)], run_props=RunProperties(True, False, False), ) ) results.append( test_driver.run_debugger_tests( [semver.VersionInfo.parse(dl_old.cfg.version), semver.VersionInfo.parse(dl_new.cfg.version)], run_props=RunProperties(False, False, False), ) ) print("V" * 80) status = True table = BeautifulTable(maxwidth=140) for one_suite_result in results: if len(one_suite_result) > 0: for one_result in one_suite_result: if one_result["success"]: table.rows.append( [ one_result["testrun name"], one_result["testscenario"], # one_result['success'], "\n".join(one_result["messages"]), ] ) else: table.rows.append( [ one_result["testrun name"], one_result["testscenario"], # one_result['success'], "\n".join(one_result["messages"]) + "\n" + "H" * 40 + "\n" + one_result["progress"], ] ) status = status and one_result["success"] table.columns.header = [ "Testrun", "Test Scenario", # 'success', we also have this in message. "Message + Progress", ] table.columns.alignment["Message + Progress"] = ALIGN_LEFT tablestr = str(table) Path("testfailures.txt").write_text(tablestr, encoding="utf8") if not status: print("exiting with failure") sys.exit(1) if dl_opts.force: touch_all_tars_in_dir(version_state_tar) else: write_version_tar(version_state_tar, fresh_versions) print(tablestr) return 0
def editItem(self, indexNumber, table, tablePrint, resultsText): # function to edit items from a table global partsColumnHeaders, brainsColumnHeaders, gearsColumnHeaders, motorsColumnHeaders, wheelsColumnHeaders, othersColumnHeaders columnHeadersList = [partsColumnHeaders, brainsColumnHeaders, gearsColumnHeaders, motorsColumnHeaders, wheelsColumnHeaders, othersColumnHeaders] global partValues, brainValues, gearValues, motorValues, wheelValues, otherValues itemValuesList = [partValues, brainValues, gearValues, motorValues, wheelValues, otherValues] tableList = ['Parts', 'Brains', 'Gears', 'Motors', 'Wheels', 'Others'] data = [] i = 0 print(table) var = IntVar() for i in range(len(tableList)): if tableList[i] == table: itemValues = itemValuesList[i] columnHeaders = columnHeadersList[i] break else: i = i + 1 def clicked(): value = valueEntryBox.get() var.set(1) if value.lower() == 'keep': value = tablePrint[int(indexNumber) - 1]['{}'.format(columnHeaders[i + 1])] print(value) data.append(value) i = 0 for i in range(len(itemValues)): # loop that allows user to input new values or keep the old ones for an item valueForEditText = Label(self, text='Input "keep" to keep the same value or input new value for : ') valueForEditText.place(x=250, y=350) valueForEdit = Text(self, height=1, width=20) valueForEdit.insert(END, itemValues[i]) valueForEdit.place(x=575, y=350) valueEntryBox = Entry(root, width=20) valueEntryBox.place(x=250, y=375) confirmValueButton = Button(self, text='Enter') confirmValueButton.config(command=clicked) confirmValueButton.place(x=400, y=375) confirmValueButton.wait_variable(var) i = i + 1 data.append(indexNumber) print(data) dbEditItem(data, table) successEditLabel = Label(self, text='Edit was successful') successEditLabel.place(x=250, y=400) results = dbSearchAll(table) tablePrint = BeautifulTable() tablePrint.set_style(BeautifulTable.STYLE_COMPACT) tablePrint.column_headers = columnHeaders tablePrint.set_padding_widths(1) for row in results: memberList = [] for member in row: memberList.append(member) tablePrint.append_row(memberList) resultsText.delete('1.0', END) resultsText.insert(END, tablePrint) resultsText.update()
def main(): parser = argparse.ArgumentParser() parser.add_argument('db') parser.add_argument('mode') parser.add_argument('tq_rank') parser.add_argument('--qid_min', type=int) parser.add_argument('--qid_max', type=int) parser.add_argument('--tqc_min', type=float) parser.add_argument('--tqc_max', type=float) parser.add_argument('--user_time', type=float, default=1) args = parser.parse_args() stats = get_stats(args.db, args.mode, args.tq_rank, qid_min=args.qid_min, qid_max=args.qid_max, tqc_min=args.tqc_min, tqc_max=args.tqc_max, user_time=args.user_time) # iters_0 = sum(i == 0 for i in stats['iters']) iters_1 = sum(i <= 1 for i in stats['iters']) iters_2 = sum(i <= 2 for i in stats['iters']) iters_3 = sum(i <= 3 for i in stats['iters']) iters_4 = sum(i <= 4 for i in stats['iters']) iters_5 = sum(i <= 5 for i in stats['iters']) iters_else = sum(i is not None for i in stats['iters']) failed = sum(i is None for i in stats['iters']) # Iter boxplot data iter_q1 = np.percentile(stats['iters'], 25) iter_q2 = np.percentile(stats['iters'], 50) iter_q3 = np.percentile(stats['iters'], 75) iter_iqr = iter_q3 - iter_q1 iter_outliers = [] iter_normal = [] for it_r in stats['iters']: if it_r >= iter_q1 - (1.5 * iter_iqr) and it_r <= iter_q3 + (1.5 * iter_iqr): iter_normal.append(it_r) else: iter_outliers.append(it_r) iter_max = max(iter_normal) iter_min = min(iter_normal) table = BeautifulTable() table.column_headers = ['TASK INFO', 'VALUE'] table.column_alignments['TASK INFO'] = BeautifulTable.ALIGN_LEFT table.column_alignments['VALUE'] = BeautifulTable.ALIGN_RIGHT table.row_separator_char = '' table.append_row(['Total Results', '{}'.format(stats['total'])]) table.append_row(['Analyzed Results', '{}'.format(stats['analyzed'])]) table.append_row(['Mean TQC', '{}'.format(np.mean(stats['tqcs']))]) table.append_row(['Min Total CQ #', '{:.3f}'.format(np.min(stats['cq_counts']))]) table.append_row(['Mean Total CQ #', '{:.3f}'.format(np.mean(stats['cq_counts']))]) table.append_row(['Max Total CQ #', '{:.3f}'.format(np.max(stats['cq_counts']))]) print(table) table = BeautifulTable() table.column_headers = ['ITER INFO', 'VALUE'] table.column_alignments['ITER INFO'] = BeautifulTable.ALIGN_LEFT table.column_alignments['VALUE'] = BeautifulTable.ALIGN_RIGHT table.row_separator_char = '' # table.append_row(['# Tasks <= 0 Iter (%)', '{} ({:.2f}%)'.format(iters_0, iters_0 / stats['analyzed'] * 100)]) table.append_row(['# Tasks <= 1 Iter (%)', '{} ({:.2f}%)'.format(iters_1, iters_1 / stats['analyzed'] * 100)]) table.append_row(['# Tasks <= 2 Iter (%)', '{} ({:.2f}%)'.format(iters_2, iters_2 / stats['analyzed'] * 100)]) table.append_row(['# Tasks <= 3 Iter (%)', '{} ({:.2f}%)'.format(iters_3, iters_3 / stats['analyzed'] * 100)]) table.append_row(['# Tasks <= 4 Iter (%)', '{} ({:.2f}%)'.format(iters_4, iters_4 / stats['analyzed'] * 100)]) table.append_row(['# Tasks <= 5 Iter (%)', '{} ({:.2f}%)'.format(iters_5, iters_5 / stats['analyzed'] * 100)]) table.append_row(['# Tasks >= 6 Iter (%)', '{} ({:.2f}%)'.format(iters_else, iters_else / stats['analyzed'] * 100)]) table.append_row(['# Failed Tasks (%)', '{} ({:.2f}%)'.format(failed, failed / stats['analyzed'] * 100)]) table.append_row(['Min # Iters', '{:.3f}'.format(iter_min)]) table.append_row(['First Quartile # Iters', '{:.3f}'.format(iter_q1)]) table.append_row(['Median # Iters', '{:.3f}'.format(iter_q2)]) table.append_row(['Third Quartile # Iters', '{:.3f}'.format(iter_q3)]) table.append_row(['Max # Iters', '{:.3f}'.format(iter_max)]) table.append_row(['Outlier Iters', '{}'.format(iter_outliers)]) table.append_row(['Std. Dev. # Iters', '{:.3f}'.format(np.std(stats['iters']))]) table.append_row(['Mean # Iters', '{:.3f}'.format(np.mean(stats['iters']))]) table.append_row(['Mean Iter/CQ Ratio', '{:.3f}'.format(np.mean(stats['iter_cq_ratios']))]) print(table) table = BeautifulTable() table.column_headers = ['TIME INFO', 'VALUE'] table.column_alignments['TIME INFO'] = BeautifulTable.ALIGN_LEFT table.column_alignments['VALUE'] = BeautifulTable.ALIGN_RIGHT table.row_separator_char = '' table.append_row(['Mean Executed CQ #', '{:.3f}'.format(np.mean(stats['exec_cq_counts']))]) table.append_row(['Mean Query Time', '{:.3f}s'.format(np.mean(stats['query_times']))]) table.append_row(['Mean Computation Time', '{:.3f}s'.format(np.mean(stats['comp_times']))]) table.append_row(['Median Total Time', '{:.3f}s'.format(np.median(stats['total_times']))]) table.append_row(['Mean Total Time', '{:.3f}s'.format(np.mean(stats['total_times']))]) table.append_row(['Max Total Time', '{:.3f}s'.format(np.max(stats['total_times']))]) table.append_row(['Mean Total Time/Iter', '{:.3f}s'.format(np.mean(stats['times_per_iter']))]) table.append_row(['Max Total Time/Iter', '{:.3f}s'.format(np.max(stats['times_per_iter']))]) table.append_row(['Avg. Max Single Iter', '{:.3f}s'.format(np.mean(stats['max_iter_times']))]) print(table) table = BeautifulTable() table.column_headers = ['SLOWEST TASKS', 'TIME'] table.column_alignments['SLOWEST TASKS'] = BeautifulTable.ALIGN_LEFT table.column_alignments['TIME'] = BeautifulTable.ALIGN_RIGHT table.row_separator_char = '' for idx in np.argsort(stats['total_times'])[::-1][:11]: table.append_row([stats['qids'][idx], stats['total_times'][idx]]) print(table)
def do_l(self, arg): 'Lists current portfolio' portfolio = self.trader.portfolios() if portfolio['extended_hours_equity']: equity = float(portfolio['extended_hours_equity']) else: equity = float(portfolio['equity']) print 'Equity Value: %.2f' % equity previous_close = float(portfolio['adjusted_equity_previous_close']) change = equity - previous_close print '%s%.2f Today (%.2f%%)' % (('+' if change > 0 else ''), change, change / previous_close * 100.0) account_details = self.trader.get_account() if 'margin_balances' in account_details: print 'Buying Power:', account_details['margin_balances'][ 'unallocated_margin_cash'] # Load Stocks positions = self.trader.securities_owned() symbols = [ self.get_symbol(position['instrument']) for position in positions['results'] ] quotes_data = {} if len(symbols) > 0: raw_data = self.trader.quotes_data(symbols) for quote in raw_data: if quote['last_extended_hours_trade_price']: price = quote['last_extended_hours_trade_price'] else: price = quote['last_trade_price'] quotes_data[quote['symbol']] = price table = BeautifulTable() table.column_headers = [ "symbol", "current price", "quantity", "total equity", "cost basis", "p/l" ] for position in positions['results']: quantity = int(float(position['quantity'])) symbol = self.get_symbol(position['instrument']) price = quotes_data[symbol] total_equity = float(price) * quantity buy_price = float(position['average_buy_price']) p_l = total_equity - buy_price * quantity table.append_row( [symbol, price, quantity, total_equity, buy_price, p_l]) print "Stocks:" print(table) # Load Options option_positions = self.trader.options_owned() table = BeautifulTable() table.column_headers = [ "option", "price", "quantity", "equity", "cost basis", "p/l" ] for op in option_positions: quantity = float(op['quantity']) if quantity == 0: continue cost = float(op['average_price']) if op['type'] == 'short': quantity = -quantity cost = -cost instrument = op['option'] option_data = self.trader.session.get(instrument).json() expiration_date = option_data['expiration_date'] strike = float(option_data['strike_price']) type = option_data['type'] symbol = op[ 'chain_symbol'] + ' ' + expiration_date + ' ' + type + ' $' + str( strike) info = self.trader.get_option_marketdata(instrument) last_price = float(info['adjusted_mark_price']) total_equity = (100 * last_price) * quantity change = total_equity - (float(cost) * quantity) table.append_row( [symbol, last_price, quantity, total_equity, cost, change]) print "Options:" print(table)
MF2 = 0 Allocation_frequency = 0 ONE_cc_eq, ONE_cd_eq, ONE_dc_eq, ONE_dd_eq = 0, 0, 0, 0 TWO_cc_eq, TWO_cd_eq, TWO_dc_eq, TWO_dd_eq = 0, 0, 0, 0 ONE_eq_strat, TWO_eq_strat = [], [] ONE_P1_broker_support, ONE_P2_broker_support, TWO_P1_broker_support, TWO_P2_broker_support = 0, 0, 0, 0 print("TRANSFER GAME COMPLETED. NUMBER OF EPISODES:", i) ONE_P1_transfer_balance = round((sum(TWO_P1_transfer_discounted_storage) - sum(ONE_P1_transfer_storage)), 2) ONE_P2_transfer_balance = round((sum(TWO_P2_transfer_discounted_storage) - sum(ONE_P2_transfer_storage)), 2) TWO_P1_transfer_balance = round((sum(ONE_P1_transfer_discounted_storage) - sum(TWO_P1_transfer_storage)), 2) TWO_P2_transfer_balance = round((sum(ONE_P2_transfer_discounted_storage) - sum(TWO_P2_transfer_storage)), 2) print("") final_table = BeautifulTable() final_table.column_headers = ["CUMULATIVE", "UTILITIES", "BALANCE", "REQS", "ALLOCATIONS", "AMOUNT"] final_table.append_row(["ONE P1", round(sum(ONE_P1_EQ_utilities_storage), 2), ONE_P1_transfer_balance, ONE_P1_total_requests, ONE_P1_allocation_num, ONE_P1_allocated_amount]) final_table.append_row(["ONE P2", round(sum(ONE_P2_EQ_utilities_storage), 2), ONE_P2_transfer_balance, ONE_P2_total_requests, ONE_P2_allocation_num, ONE_P2_allocated_amount]) final_table.append_row(["TWO P1", round(sum(TWO_P1_EQ_utilities_storage), 2), TWO_P1_transfer_balance, TWO_P1_total_requests, TWO_P1_allocation_num, TWO_P1_allocated_amount]) final_table.append_row(["TWO P2", round(sum(TWO_P2_EQ_utilities_storage), 2), TWO_P2_transfer_balance, TWO_P2_total_requests, TWO_P2_allocation_num, TWO_P2_allocated_amount]) print(final_table) average_discount_rate = (sum(BROKER_rate_storage) / len(BROKER_rate_storage)) print("") broker_final_table = BeautifulTable()
def _print_matrix(self, M): table = BeautifulTable() for row in M: table.append_row(row) print(table)
def printTable(benchmarks, dataset_task_type): n_folds = 1 global log_data global feature_data baseline_envelope = {} table = BeautifulTable() table.column_headers = [ " ", "Original", "CGL", "Original Envelope", "COMET Envelope" ] for benchmark in benchmarks: baseline_envelope[benchmark] = {} for feature in feature_data[benchmark]: total_test = [] total_train = [] total_envelope_test = [] total_envelope_train = [] baseline_envelope[benchmark][feature] = [] for fold in range(0, n_folds): comp = compare( dataset_task_type[benchmark], log_data[benchmark][feature] [fold][0].test.upper.envelope_quality, log_data[benchmark] [feature][fold][0].test.lower.envelope_quality) if comp == "upper": total_test.append(log_data[benchmark][feature][fold] [0].test.upper.baseline_quality) total_envelope_test.append( log_data[benchmark][feature][fold] [0].test.upper.envelope_quality) total_train.append(log_data[benchmark][feature][fold] [0].train.upper.baseline_quality) total_envelope_train.append( log_data[benchmark][feature][fold] [0].train.upper.envelope_quality) else: total_test.append(log_data[benchmark][feature][fold] [0].test.lower.baseline_quality) total_envelope_test.append( log_data[benchmark][feature][fold] [0].test.lower.envelope_quality) total_train.append(log_data[benchmark][feature][fold] [0].train.lower.baseline_quality) total_envelope_train.append( log_data[benchmark][feature][fold] [0].train.lower.envelope_quality) avg_test = str(round(mean(total_test), 2)) avg_env_test = str(round(mean(total_envelope_test), 2)) avg_train = str(round(mean(total_train), 2)) avg_env_train = str(round(mean(total_envelope_train), 2)) comp = compare( dataset_task_type[benchmark], log_data[benchmark][feature] [fold][1].test.upper.envelope_quality, log_data[benchmark] [feature][fold][1].test.lower.envelope_quality) total_test = [] total_train = [] total_envelope_test = [] total_envelope_train = [] total_test.append(log_data[benchmark][feature][fold] [1].test.upper.baseline_quality) total_train.append(log_data[benchmark][feature][fold] [1].train.upper.baseline_quality) if comp == "upper": print( "For best performance and monotonic predictions, use best_model.h5 with upper envelope" ) total_envelope_test.append( log_data[benchmark][feature][fold] [1].test.upper.envelope_quality) total_envelope_train.append( log_data[benchmark][feature][fold] [1].train.upper.envelope_quality) else: print( "For best performance and monotonic predictions, use best_model.h5 with lower envelope" ) total_envelope_test.append( log_data[benchmark][feature][fold] [1].test.lower.envelope_quality) total_envelope_train.append( log_data[benchmark][feature][fold] [1].train.lower.envelope_quality) avg_comet_test = str(round(mean(total_test), 2)) avg_comet_train = str(round(mean(total_train), 2)) avg_comet_env_test = str(round(mean(total_envelope_test), 2)) avg_comet_env_train = str(round(mean(total_envelope_train), 2)) table.append_row([ "Train", avg_train, avg_comet_train, avg_env_train, avg_comet_env_train ]) table.append_row([ "Test", avg_test, avg_comet_test, avg_env_test, avg_comet_env_test ]) print(table)
def PlayerList(list): table = BeautifulTable() table.column_headers = ["Name", "Team"] for user in list: table.append_row([user.getName(), user.getTeam()]) print(table)
async def who_wins(self, ctx): currentWar = await self.bot.coc.get_clan_war(ROYALS_TAG) ourWarLog = [] enemyWarLog = [] ourLoseCnt = 0 enemyLoseCnt = 0 enemy_tag = "" if currentWar.state == "notinWar": await ctx.send("Royals is not in war") return elif currentWar.state == "inWar" or currentWar.state == "preparation": enemyTag = currentWar.opponent.tag enemyInfo = await self.bot.coc.get_clan(enemyTag) if "FWA" in enemyInfo.description: tempWarLogUs = await self.bot.coc.get_warlog(ROYALS_TAG) tempWarLogEnemy = await self.bot.coc.get_warlog(enemyTag) parentTable = BeautifulTable() warLogTableUs = BeautifulTable() warLogTableEnemy = BeautifulTable() parentTable.column_headers = [ "Jesters Royals", currentWar.opponent.name ] # Us calculations i = 0 while i < 7: ourWarLog.append(tempWarLogUs[i]) i += 1 for war in ourWarLog: warLogTableUs.append_row([war.result, war.opponent.name]) if war.result == "lose": ourLoseCnt += 1 # Enemy calculations i = 0 while i < 7: enemyWarLog.append(tempWarLogEnemy[i]) i += 1 for war in enemyWarLog: warLogTableEnemy.append_row( [war.result, war.opponent.name]) if war.result == "lose": enemyLoseCnt += 1 parentTable.append_row([warLogTableUs, warLogTableEnemy]) await ctx.send(parentTable) # Display winner base on lose count if ourLoseCnt > enemyLoseCnt: await ctx.send( "Congrats an FWA Clan!! Royals is winning their war with {0}" .format(currentWar.opponent.name)) return elif ourLoseCnt < enemyLoseCnt: await ctx.send( "Congrats an FWA Clan!! Royals is losing their war with {0}" .format(currentWar.opponent.name)) return else: await ctx.send( "Congrats an FWA Clan!! Your lose counts are the same") return else: await ctx.send("Sorry! Enemy Clan is not FWA") return
def print_results(metric_results): # name x value results_numeric = {} # {metric :{x classname : value}} class_results_numeric = {} for metric_name, metric_return in metric_results.items(): if isinstance(metric_return, NumericMetricResult): # this value is a number results_numeric[metric_name] = float(metric_return()) elif isinstance(metric_return, NumericClassMetricResult): # this return val is a dict classes x numeric value for class_name, value in metric_return().items(): if not metric_name in class_results_numeric: # this class was not added before -> add class_results_numeric[metric_name] = {} # sanity check that we do not override something assert class_name not in class_results_numeric[metric_name] class_results_numeric[metric_name][class_name] = value else: warnings.warn( 'The following metrics result is of an unprintable type and therefore ignored: {}' .format(metric_name)) if results_numeric: # print the numeric results first numeric_table = BeautifulTable() numeric_table.header = ['Metric', 'Value'] for metric_name, numeric_result in results_numeric.items(): numeric_table.rows.append([metric_name, numeric_result]) # print('=' * 40) # print('Evaluation results') # print('=' * 40) print(numeric_table) if class_results_numeric: numeric_per_class_table = BeautifulTable() header = None for metric_name in class_results_numeric: # sort the classes to get the same order for all rows classes = sorted(class_results_numeric[metric_name]) if not header: # add the class names to header header = classes # assert that the same classes are used for all metrics assert header == classes row = [metric_name] for class_id in class_results_numeric[metric_name]: value = class_results_numeric[metric_name][class_id]() row.append(value) numeric_per_class_table.rows.append(row) header = list(map(str, header)) numeric_per_class_table.header = ['Metric'] + header print(numeric_per_class_table)
def getRow(self, header): """Ask the user to fill a row and check if every user input is correct. Then ask the user is everything is okay or if he wants to modify something Note : - The checks are made after every input but not when asking to modify something >> critical - There could be more checks Args: header ([list]): header list of String for "header" row for BeautifulTables Returns: [list]: Returns a list with every user input in it """ #Initialisation de la liste qui va récupérer les variables pour la query query_list = ["/////"] #Préparation de la table magique table_vide = BeautifulTable(maxwidth=300) result_table = BeautifulTable(maxwidth=600) result_table.columns.header = header table_vide.columns.header = header table_vide.rows.append( ["", "", "", ""] ) #Ajout de valeur "invisible" dans la table magique pour pouvoir afficher à l'utilisateur la row qu'il souhaite remplir de valeurs print(table_vide) try: for value in header: if value != "ID Utilisateur": if value == "Password": rep = getpass("Password : "******"### Veuillez entrer une valeur ###\n" + value + " : ") #Gestion des erreurs et mauvais input if rep == "": while rep == "": rep = input( "### Veuillez entrer une valeur ###\n" + value + " : ") elif value == "Nom" or value == "Pseudonyme": if len(rep) > 20: reponse = input( "Attention, cette valeur est trop longue. Elle risque d'être tronquée dans la base de données.\nEntrer une nouvelle valeur ? Y/N : " ) if reponse == "Y": rep = input( "### Veuillez entrer une valeur ###\n" + value + " : ") elif value == "Password": check_password = self.strongPasswordChecker(rep) while (check_password > 0): print( "Veuillez indiquer un mot de passe contenant une majuscule, une minuscule et un chiffre. 3 même lettres à la suite sont interdites. min : 6 max : 20" ) rep = getpass("Password : "******"+++ Erreur dans l'insertion de données, veuillez recommencer +++" ) result_table.rows.append(query_list) print(result_table) while (True): try: print( "\n\nVeuillez vérifier ces valeurs, sont-elles correctes ? Y/N \nou annuler et revenir au menu ? Q" ) reponse = input("Votre réponse : ") if reponse == "Y": return query_list elif reponse == "N": while (True): reponse = input( "Indiquez le nom exact de la colonne dont la valeur n'est pas correcte. \nSi vous souhaitez ré-afficher les valeurs, entrez \"P\"\nSi vous souhaitez valider, entrez \"V\"\nSi vous souhaitez annuler et retour au menu, entrez \"A\".\n Votre réponse : " ) if reponse == "A": input( "Retour au menu... Appuyer sur une touche pour continuer" ) return None elif reponse == "V": return query_list elif reponse == "P": print( result_table ) #TODO: Implémenter une méthode de récupération de mot de passe similaire à celle du dessus || découper ce bloc en plusieurs fonctions elif reponse in header: new_value = input("Entrez la valeur pour " + reponse + ": ") header[header.index(reponse)] = new_value result_table.rows.append(query_list) elif reponse == "Q": print("### Retour au menu... ###") return None except: print("+++ Erreur dans la validation des données +++")
def __init__(self, model_name: str): self.model_name = model_name self._monitors = {} self._new_monitors = {} self.table = BeautifulTable(200)
if diff >= 0: return colored('+' + "{0:.0%}".format(diff), 'green') else: return colored("{0:.0%}".format(diff), 'red') if(len(sys.argv) != 3): print("Usage: " + sys.argv[0] + " benchmark1.json benchmark2.json") exit() with open(sys.argv[1]) as old_file: old_data = json.load(old_file) with open(sys.argv[2]) as new_file: new_data = json.load(new_file) table = BeautifulTable(default_alignment = BeautifulTable.ALIGN_LEFT) table.column_headers = ["Benchmark", "prev. iter/s", "new iter/s", "change"] table.row_separator_char = '' table.top_border_char = '' table.bottom_border_char = '' table.left_border_char ='' table.right_border_char ='' average_diff_sum = 0.0 for old, new in zip(old_data['benchmarks'], new_data['benchmarks']): if old['name'] != new['name']: print("Benchmark name mismatch") exit() diff = float(new['items_per_second']) / float(old['items_per_second']) - 1 average_diff_sum += diff
# # You should have received a copy of the GNU General Public License # along with readit. If not, see <http://www.gnu.org/licenses/>. import sqlite3 # library of database used for project import datetime # used for getting current time and date from beautifultable import BeautifulTable # display output in table format import webbrowser # used to open url in browser import os # used to find home directory of user import csv # used to store bookmarks in CSV file from glob import glob # used to find path name from os.path import expanduser # used to perform operations on pathnames date = datetime.date.today() table = BeautifulTable() table_tag = BeautifulTable() table.left_border_char = '|' table.right_border_char = '|' table.top_border_char = '=' table.header_separator_char = '=' table.column_headers = ["ID", "URL", "TAG", "DATE", "TIME"] table_tag.left_border_char = '|' table_tag.right_border_char = '|' table_tag.top_border_char = '=' table_tag.header_separator_char = '=' table_tag.column_headers = ["Available TAGs"] class DatabaseConnection(object): """Class to perform database operations.
def main(experiment_dir, baseline_epochs=20, finetune_epochs=15, seed=None, batch_size=64, image_size=256, n_splits=None): """ Main training loop Parameters ---------- seed : int or None, optional batch_size : int, optional image_size : int, optional n_splits : int or None, optional If None, the model will be trained on all the available data. Default is None. """ # Create experiment dir and checkpoints dir experiment_dir = Path(experiment_dir) experiment_dir.mkdir(exist_ok=True, parents=True) checkpoints_dir = experiment_dir / "checkpoints" checkpoints_dir.mkdir(exist_ok=True) # Set up root logger logger_path = experiment_dir / "train.log" logger = logging.getLogger() logger.setLevel(logging.INFO) formatter = logging.Formatter( "[%(asctime)s] %(name)s:%(lineno)d %(levelname)s :: %(message)s") file_handler = logging.FileHandler(logger_path) file_handler.setFormatter(formatter) logger.addHandler(file_handler) stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) # Get train logger logger = logging.getLogger("defeatcovid19.train") if seed is not None: # Fix seed to improve reproducibility random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True # Pretrain with Chest XRay Pneumonia dataset (>5k images) pneumonia_classifier = Resnet34() dataset = ChestXRayPneumoniaDataset(Path("/data/chest_xray_pneumonia"), image_size) dataset.build() # dataset = NIHCX38Dataset(Path('/data/nih-cx38'), size, balance=True) # dataset.build() train_idx, validation_idx = train_test_split(list(range(len(dataset))), test_size=0.2, stratify=dataset.labels) trainer = Trainer("baseline_classifier", pneumonia_classifier, dataset, batch_size, train_idx, validation_idx, checkpoints_dir) trainer.run(max_epochs=baseline_epochs) # Fine tune with COVID-19 Chest XRay dataset (~120 images) dataset = COVIDChestXRayDataset(Path("/data/covid-chestxray-dataset"), image_size) dataset.build() if n_splits is not None: logger.info(f"Executing a {n_splits}-fold cross validation") kfold_metrics = { "train": { "loss": [], "roc": [], "accuracy": [] }, "val": { "loss": [], "roc": [], "accuracy": [] }, } split = 1 skf = StratifiedKFold(n_splits=n_splits) for train_idx, validation_idx in skf.split(dataset.df, dataset.labels): logger.info("===Split #{}===".format(split)) # Start from the pneumonia classifier classifier = copy.deepcopy(pneumonia_classifier) # Checkpoints per split checkpoints_dir_split = checkpoints_dir / f"split_{split}" checkpoints_dir_split.mkdir(exist_ok=True) trainer = Trainer("covid19_classifier", classifier, dataset, batch_size, train_idx, validation_idx, checkpoints_dir_split) trainer_metrics = trainer.run(max_epochs=finetune_epochs) # Record metrics for the current split for data_split_id, data_split_metrics in trainer_metrics.items(): for metric_id, metric in data_split_metrics.items(): kfold_metrics[data_split_id][metric_id].append(metric) split += 1 # Summarize metrics from all splits and compute mean and std table = BeautifulTable() table.column_headers = ( ["Metric name"] + [f"Split {split_num+1}" for split_num in range(n_splits)] + ["Mean", "Std"]) for data_split_id, data_split_metrics in kfold_metrics.items(): for metric_id, metric in data_split_metrics.items(): metric_vals = kfold_metrics[data_split_id][metric_id] mean_metric = np.mean(metric_vals) std_metric = np.std(metric_vals) table_row = [f"{data_split_id} {metric_id}" ] + metric_vals + [mean_metric, std_metric] table.append_row(table_row) logger.info(f"SUMMARY\n{table}") else: logger.info("Training with a fixed split") # Train / test split for covid data train_idx, validation_idx = train_test_split(list(range(len(dataset))), test_size=0.2, stratify=dataset.labels) # Start from the pneumonia classifier classifier = copy.deepcopy(pneumonia_classifier) trainer = Trainer("covid19_classifier", classifier, dataset, batch_size, train_idx, validation_idx, checkpoints_dir) trainer_metrics = trainer.run(max_epochs=15) # Summarize metrics from training table = BeautifulTable() table.column_headers = ["Metric name", "Metric value"] for data_split_id, data_split_metrics in trainer_metrics.items(): for metric_id, metric in data_split_metrics.items(): table_row = [f"{data_split_id} {metric_id}", metric] table.append_row(table_row) logger.info(f"SUMMARY\n{table}")
def setup(self): challenge_data = json.loads(challenge_response.challenges) host_team_data = json.loads(challenge_response.challenge_host_teams) participant_team_data = json.loads( challenge_response.challenge_participant_teams) url = "{}{}" responses.add( responses.GET, url.format(API_HOST_URL, URLS.participant_teams.value), json=participant_team_data, status=200, ) responses.add( responses.GET, url.format(API_HOST_URL, URLS.host_teams.value), json=host_team_data, status=200, ) responses.add( responses.GET, url.format(API_HOST_URL, URLS.participant_challenges.value).format("3"), json=challenge_data, status=200, ) responses.add( responses.GET, url.format(API_HOST_URL, URLS.host_challenges.value).format("2"), json=challenge_data, status=200, ) challenges_json = challenge_data["results"] table = BeautifulTable(max_width=200) attributes = ["id", "title", "short_description"] columns_attributes = [ "ID", "Title", "Short Description", "Creator", "Start Date", "End Date", ] table.column_headers = columns_attributes for challenge_data in reversed(challenges_json): values = list(map(lambda item: challenge_data[item], attributes)) creator = challenge_data["creator"]["team_name"] start_date = convert_UTC_date_to_local( challenge_data["start_date"]) end_date = convert_UTC_date_to_local(challenge_data["end_date"]) values.extend([creator, start_date, end_date]) table.append_row([ colored(values[0], 'white'), colored(values[1], 'yellow'), colored(values[2], 'cyan'), colored(values[3], 'white'), colored(values[4], 'green'), colored(values[5], 'red'), ]) self.output = str(table)
def be_table(result, keys): table = BeautifulTable() table.append_row(keys) for row in result: table.append_row(row) print(table)
def predict_matrix(x1, x0, X_test, y_test, k): obj_1 = [] obj_0 = [] pi_1 = [] mean_1 = [] cov_1 = [] pi_0 = [] mean_0 = [] cov_0 = [] for run in range(10): pi1_temp, mean1_temp, cov1_temp, obj1_temp, pi0_temp, mean0_temp, cov0_temp, obj0_temp = EM( x0, x1, k) obj_1.append(obj1_temp[-1]) obj_0.append(obj0_temp[-1]) pi_1.append(pi1_temp) pi_0.append(pi0_temp) mean_1.append(mean1_temp) mean_0.append(mean0_temp) cov_1.append(cov1_temp) cov_0.append(cov0_temp) idx1 = np.argmax(np.array(obj_1)) idx0 = np.argmax(np.array(obj_0)) pi1_best = pi_0[idx1] pi0_best = pi_0[idx0] mean1_best = mean_1[idx1] mean0_best = mean_0[idx0] cov1_best = cov_1[idx1] cov0_best = cov_0[idx0] cdf1 = 0 cdf0 = 0 for i in range(k): cdf1 += pi1_best[i] * st.multivariate_normal.pdf( X_test, mean1_best[i], cov1_best[i], allow_singular=True) cdf0 += pi0_best[i] * st.multivariate_normal.pdf( X_test, mean0_best[i], cov0_best[i], allow_singular=True) p0 = x0.shape[0] / X_train.shape[0] p1 = x1.shape[0] / X_train.shape[0] probability1 = cdf1 * p1 probability0 = cdf0 * p0 label = [] for i in range(X_test.shape[0]): if probability1[i] >= probability0[i]: label.append(1) else: label.append(0) TP = 0 FP = 0 TN = 0 FN = 0 for i in range(len(label)): if y_test[i] == 1: if label[i] == 1: TP += 1 else: FP += 1 else: if label[i] == 1: FN += 1 else: TN += 1 table = BeautifulTable() table.column_headers = ["Ground Truth/Prediction", "1", "0"] table.append_row(["1", TP, FP]) table.append_row(["0", FN, TN]) accuracy = (TP + TN) / (TP + TN + FN + FP) return table, accuracy
# Copyright (c) 2018 Ruslan Variushkin, [email protected] from beautifultable import BeautifulTable from .ispmanagerclass import * table = BeautifulTable(max_width=300) def print_data(domains, names): table.column_headers = names for domain in domains: data = [] for key in names: try: local_data = domain[key] except: domain[key] = "None" local_data = domain[key] data.append(local_data) table.column_alignments[key] = BeautifulTable.ALIGN_LEFT table.append_row([x for x in data]) print(table) def load_data(names, query, *args): data = list_data(names, *args) return print_data(data.list(query), names) def load_db_data(names, query, *args): data = list_data(names, *args) head = ["owner", "db_name", "db_user"]
def showIndexTable(self, event): # function to get category and display onto the GUI global partsColumnHeaders, brainsColumnHeaders, gearsColumnHeaders, motorsColumnHeaders, wheelsColumnHeaders, othersColumnHeaders columnHeadersList = [partsColumnHeaders, brainsColumnHeaders, gearsColumnHeaders, motorsColumnHeaders, wheelsColumnHeaders, othersColumnHeaders] tableList = ('Parts', 'Brains', 'Gears', 'Motors', 'Wheels', 'Others') table = self.comboBox.get() results = dbSearchAll(table) tablePrint = BeautifulTable() # creating the Beautiful Table for printing tablePrint.set_style(BeautifulTable.STYLE_COMPACT) i = 0 for i in range(len(tableList)): if table == tableList[i]: columnHeaders = columnHeadersList[i] break else: i = i + 1 tablePrint.column_headers = columnHeaders tablePrint.set_padding_widths(1) for row in results: memberList = [] for member in row: memberList.append(member) tablePrint.append_row(memberList) scrollBar = Scrollbar(self) scrollBar.pack(side=RIGHT, fill=Y) resultsText = Text(self, height=10, width=75) resultsText.place(x=250, y=500) scrollBar.config(command=resultsText.yview) resultsText.insert(END, tablePrint) # inserting data into text widget editSearchEntry = Entry(root, width=20) editSearchEntry.place(x=450, y=325) # placing an entry box for user to edit an item from the displayed index indexText = Label(root, text='Choose index number to edit : ') indexText.place(x=450, y=300) def clickedSearch(): indexNumber = editSearchEntry.get() self.editItem(indexNumber, table, tablePrint, resultsText) clickedButton = Button(root, text='Search', command=clickedSearch) clickedButton.place(x=590, y=325) print(tablePrint) deleteItemSearchEntry = Entry(root, width=20) deleteItemSearchEntry.place(x=700, y=325) # placing an entry box for user to remove an item form the displayed index deleteText = Label(root, text='Choose index number to remove : ') deleteText.place(x=700, y=300) def clickedDelete(): # item removal function called when user clicks delete button indexNumber = deleteItemSearchEntry.get() dbRemoveItem(indexNumber, table) tablePrint = BeautifulTable() tablePrint.set_style(BeautifulTable.STYLE_COMPACT) results = dbSearchAll(table) tablePrint.column_headers = columnHeaders tablePrint.set_padding_widths(1) for row in results: memberList = [] for member in row: memberList.append(member) tablePrint.append_row(memberList) resultsText.delete('1.0', END) resultsText.insert(END, tablePrint) resultsText.update() deleteButton = Button(root, text='Delete', command=clickedDelete) deleteButton.place(x=840, y=325)
def tax_comparison(taxable_income, member, child, UPB, rate, efficient_state_rate, local_tax, joint=True, existing_mtg=False, display=True, detail=False): # Personal exemption (applied to both standard and itemized) old_PersonalExemption_deduction = PersonalExemption_deduction_old( taxable_income, member, joint=joint) # Child care tax credit (applied to both standard and itemized) old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child, joint=joint) new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child, joint=joint) # Mortgage Interest Rate deduction (applied to itemized and AMT) old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate) new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=existing_mtg) # State and local tax (applied to itemized only) old_SALT_deduction = SALT_deduction_old(taxable_income, efficient_state_rate, local_tax) new_SALT_deduction = SALT_deduction_new(taxable_income, efficient_state_rate, local_tax) # calculate standard tax if joint: old_standard_deduction = 12600 new_standard_deduction = 24000 else: old_standard_deduction = 6300 new_standard_deduction = 12000 # tax before Child care credit old_tax_beforeCCTC_standard = old_bracket(taxable_income - old_standard_deduction - old_PersonalExemption_deduction, joint=joint) new_tax_beforeCCTC_standard = new_bracket(taxable_income - new_standard_deduction, joint=joint) # tax before Child after credit old_tax_standard = max(0, old_tax_beforeCCTC_standard - old_ChildCare_Credit) new_tax_standard = max(0, new_tax_beforeCCTC_standard - new_ChildCare_Credit) # calculate itemized tax # tax before Child care credit old_tax_beforeCCTC_itemized = old_bracket( taxable_income - old_MTG_IR_deduction - old_SALT_deduction - old_PersonalExemption_deduction, joint=joint) new_tax_beforeCCTC_itemized = new_bracket( taxable_income - new_MTG_IR_deduction - new_SALT_deduction, joint=joint) # tax before Child after credit old_tax_itemized = max(0, old_tax_beforeCCTC_itemized - old_ChildCare_Credit) new_tax_itemized = max(0, new_tax_beforeCCTC_itemized - new_ChildCare_Credit) # calculate AMT tax AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint) # tax before Child care credit old_tax_beforeCCTC_AMT = AMT_bracket( taxable_income - AMT_exemption_amount - old_MTG_IR_deduction, joint=joint) # tax before Child after credit old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit) tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT) tax_new = min(new_tax_standard, new_tax_itemized) if display: print("Current Tax Should Pay: $%3.2f" % tax_old) print(" Standard: $%3.2f" % old_tax_standard) print(" Itemized: $%3.2f" % old_tax_itemized) print(" AMT tax: $%3.2f" % old_tax_AMT) print("New Tax Should Pay: $%3.2f" % tax_new) print(" Standard: $%3.2f" % new_tax_standard) print(" Itemized: $%3.2f" % new_tax_itemized) if detail: print("***********************************************") print("${:,} taxable income".format(taxable_income) + ', joint = %r' % joint) print("%d Family Member, %d child(ren)" % (member, child)) print( 'Existing Mortgage: %r' % existing_mtg + ', ${:,} Mortgage Balance'.format(UPB) + ', %3.2f%% Interest Rate' % (rate * 100), ) print( '${:,} Local Tax'.format(local_tax) + ', %d%% State/City Tax Rate' % (efficient_state_rate * 100), ) print("***********************************************") table = BeautifulTable() table.column_headers = ["Item", "Current", "New"] table.append_row([ "Standard Deduction", old_standard_deduction, new_standard_deduction ]) table.append_row( ["Personal Exemption", old_PersonalExemption_deduction, 'NA']) table.append_row([ "Child Care Tax Credit", old_ChildCare_Credit, new_ChildCare_Credit ]) table.append_row([ "Mortgage Interest Deduction", old_MTG_IR_deduction, new_MTG_IR_deduction ]) table.append_row([ "State and Local Tax Deduction", old_SALT_deduction, new_SALT_deduction ]) table.append_row([ "AMT Exemption (not including MTG Interest)", AMT_exemption_amount, "NA" ]) table.append_row(["Tax", tax_old, tax_new]) print(table) return [ tax_old, tax_new, old_tax_standard, new_tax_standard, old_tax_itemized, new_tax_itemized, old_tax_AMT ]
def insertData(self, event): # function to insert data into the table table = self.comboBox.get() global partsColumnHeaders, brainsColumnHeaders, gearsColumnHeaders, motorsColumnHeaders, wheelsColumnHeaders, othersColumnHeaders columnHeadersList = [partsColumnHeaders, brainsColumnHeaders, gearsColumnHeaders, motorsColumnHeaders, wheelsColumnHeaders, othersColumnHeaders] global partValues, brainValues, gearValues, motorValues, wheelValues, otherValues itemValuesList = [partValues, brainValues, gearValues, motorValues, wheelValues, otherValues] tableList = ('Parts', 'Brains', 'Gears', 'Motors', 'Wheels', 'Others') table = self.comboBox.get() results = dbSearchAll(table) tablePrint = BeautifulTable() tablePrint.set_style(BeautifulTable.STYLE_COMPACT) i = 0 for i in range(len(tableList)): if table == tableList[i]: itemValues = itemValuesList[i] columnHeaders = columnHeadersList[i] break else: i = i + 1 tablePrint.column_headers = columnHeaders tablePrint.set_padding_widths(1) for row in results: memberList = [] for member in row: memberList.append(member) tablePrint.append_row(memberList) scrollBar = Scrollbar(self) scrollBar.pack(side=RIGHT, fill=Y) resultsText = Text(self, height=10, width=75) resultsText.place(x=250, y=500) scrollBar.config(command=resultsText.yview) resultsText.insert(END, tablePrint) data = [] var = IntVar() def clicked(): value = valueEntryBox.get() data.append(value) var.set(1) i = 0 for i in range(len(itemValues)): valueForEditText = Label(self, text='Input value for : ') valueForEditText.place(x=250, y=350) valueForEdit = Text(self, height=1, width=20) valueForEdit.insert(END, itemValues[i]) valueForEdit.place(x=350, y=350) valueEntryBox = Entry(root, width=20) valueEntryBox.place(x=250, y=375) confirmValueButton = Button(self, text='Enter') confirmValueButton.config(command=clicked) confirmValueButton.place(x=400, y=375) confirmValueButton.wait_variable(var) i = i + 1 dbInsertItem(data, table) results = dbSearchAll(table) tablePrint = BeautifulTable() tablePrint.set_style(BeautifulTable.STYLE_COMPACT) tablePrint.column_headers = columnHeaders tablePrint.set_padding_widths(1) for row in results: memberList = [] for member in row: memberList.append(member) tablePrint.append_row(memberList) resultsText.delete('1.0', END) resultsText.insert(END, tablePrint) resultsText.update()
def train_test(): for k in model_select: table = BeautifulTable() avgtable = BeautifulTable() fieldnames1 = [model_lst[k],'Avg','Std_dev'] #column names report GLOBAL CSV folder = os.path.join(cwd,'Report_'+str(model_lst[k])) if not os.path.exists(folder): os.mkdir(folder) logfilepath = os.path.join(folder,'log.txt') logfile = open(logfilepath,"w") with open(os.path.join(folder,'Report_folds.csv'),'w') as f_fold, open(os.path.join(folder,'Report_global.csv'),'w') as f_global: writer = csv.DictWriter(f_fold, fieldnames = fieldnames) writer1 = csv.DictWriter(f_global, fieldnames = fieldnames1) writer.writeheader() writer1.writeheader() t0 = 0 t1 = 0 for i in range(1,nfold+1): t0 = time.time() setSeeds(0) class Traindataset(Dataset): def __init__(self): self.data=trainfolds[i-1] self.x_data=torch.from_numpy(np.asarray(self.data.iloc[:, 0:-1])) self.len=self.data.shape[0] self.y_data = torch.from_numpy(np.asarray(self.data.iloc[:, [-1]])) if (use_cuda): self.x_data = self.x_data.cuda() self.y_data = self.y_data.cuda() def __getitem__(self, index): return self.x_data[index], self.y_data[index] def __len__(self): return self.len class Testdataset(Dataset): def __init__(self): self.data=testfolds[i-1] self.x_data=torch.from_numpy(np.asarray(self.data.iloc[:, 0:-1])) self.len=self.data.shape[0] self.y_data = torch.from_numpy(np.asarray(self.data.iloc[:, [-1]])) if (use_cuda): self.x_data = self.x_data.cuda() self.y_data = self.y_data.cuda() def __getitem__(self, index): return self.x_data[index], self.y_data[index] def __len__(self): return self.len traindataset = Traindataset() testdataset = Testdataset() header(model_lst,k,i,traindataset,testdataset) #train_sampler,dev_sampler,test_sampler=dev_shuffle(shuffle_train,shuffle_test,val_split,traindataset,testdataset) train_sampler,dev_sampler,test_val_sampler,test_sampler=data_split(shuffle_train,shuffle_test,val_split,test_val_split,traindataset,testdataset) #loaders train_loader = torch.utils.data.DataLoader(traindataset, batch_size=batch_size, sampler=train_sampler,drop_last=True) test_val_loader = torch.utils.data.DataLoader(traindataset, batch_size=batch_size, sampler=test_val_sampler,drop_last=True) dev_loader = torch.utils.data.DataLoader(traindataset, batch_size=batch_size, sampler=dev_sampler,drop_last=True) test_loader = torch.utils.data.DataLoader(testdataset, batch_size=batch_size, sampler=test_sampler,drop_last=True) modelClass = "Model" + str(k) model = eval(modelClass)() if (use_cuda): model = model.cuda() if doTrain: criterion = nn.BCELoss(size_average=True) optimizer = torch.optim.SGD(model.parameters(), lr) msg = 'Accuracy on test set before training: '+str(accuracy(test_loader, model))+'\n' print(msg) logfile.write(msg + "\n") #EARLY STOP epoch = 0 patience = 0 best_acc_dev=0 while (epoch<maxepoch and patience < maxpatience): running_loss = 0.0 for l, data in enumerate(train_loader, 0): inputs, labels = data if use_cuda: inputs, labels = inputs.cuda(), labels.cuda() inputs, labels = Variable(inputs), Variable(labels) y_pred = model(inputs) if use_cuda: y_pred = y_pred.cuda() loss = criterion(y_pred, labels) optimizer.zero_grad() loss.backward() optimizer.step() running_loss += loss.item() #print accuracy ever l mini-batches if l % 2000 == 1999: msg = '[%d, %5d] loss: %.3f' %(epoch + 1, l + 1, running_loss / 999) print(msg) logfile.write(msg + "\n") running_loss = 0.0 #msg = 'Accuracy on dev set:' + str(accuracy(dev_loader)) #print(msg) #logfile.write(msg + "\n") accdev = (accuracy(dev_loader, model)) msg = 'Accuracy on dev set:' + str(accdev) print(msg) logfile.write(msg + "\n") is_best = bool(accdev > best_acc_dev) best_acc_dev = (max(accdev, best_acc_dev)) save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_dev': best_acc_dev }, is_best,os.path.join(folder,'F'+str(i)+'best.pth.tar'), logfile) if is_best: patience=0 else: patience = patience+1 epoch = epoch+1 logfile.flush() if doEval: if use_cuda: state = torch.load(os.path.join(folder,'F'+str(i)+'best.pth.tar')) else: state = torch.load(os.path.join(folder,'F'+str(i)+'best.pth.tar'), map_location=lambda storage, loc: storage) stop_epoch = state['epoch'] model.load_state_dict(state['state_dict']) if not use_cuda: model.cpu() accuracy_dev = state['best_acc_dev'] model.eval() acctest = (accuracy(test_loader, model)) acctest_val = (accuracy(test_val_loader, model)) accs[i-1] = acctest accs_test_val[i-1] = acctest_val precision_0_U,recall_0_U,f1_score_0_U = pre_rec(test_loader, model, 0.0) precisions_0_U[i-1] = precision_0_U recalls_0_U[i-1] = recall_0_U f1_scores_0_U[i-1] = f1_score_0_U precision_1_U,recall_1_U,f1_score_1_U = pre_rec(test_loader, model, 1.0) precisions_1_U[i-1] = precision_1_U recalls_1_U[i-1] = recall_1_U f1_scores_1_U[i-1] = f1_score_1_U precision_0_L,recall_0_L,f1_score_0_L = pre_rec(test_val_loader, model, 0.0) precisions_0_L[i-1] = precision_0_L recalls_0_L[i-1] = recall_0_L f1_scores_0_L[i-1] = f1_score_0_L precision_1_L,recall_1_L,f1_score_1_L = pre_rec(test_val_loader, model, 1.0) precisions_1_L[i-1] = precision_1_L recalls_1_L[i-1] = recall_1_L f1_scores_1_L[i-1] = f1_score_1_L accs_dev[i-1] = accuracy_dev writer.writerow({'Fold': i,'Acc_L': acctest_val, 'Acc_U': acctest, #'P_0_U': precision_0_U,'R_0_U': recall_0_U,'F1_0_U': f1_score_0_U, 'R_0_U': recall_0_U, #'P_1_U': precision_1_U,'R_1_U': recall_1_U,'F1_1_U': f1_score_1_U, 'R_1_U': recall_1_U, #'P_0_L': precision_0_L,'R_0_L': recall_0_L,'F1_0_L': f1_score_0_L, 'R_0_L': recall_0_L, #'P_1_L': precision_1_L,'R_1_L': recall_1_L,'F1_1_L': f1_score_1_L, 'R_1_L': recall_1_L, 'Stop_epoch': stop_epoch,'Accuracy_dev': accuracy_dev}) table.column_headers = fieldnames table.append_row([i,acctest_val,acctest, #precision_0_U,recall_0_U,f1_score_0_U, recall_0_U, #precision_1_U,recall_1_U,f1_score_1_U, recall_1_U, #precision_0_L,recall_0_L,f1_score_0_L, recall_0_L, #precision_1_L,recall_1_L,f1_score_1_L, recall_1_L, stop_epoch,accuracy_dev]) print(table) print('----------------------------------------------------------------------') logfile.write(str(table) + "\n----------------------------------------------------------------------\n") t1 = time.time() times[i-1] = int(t1-t0) duration = str(datetime.timedelta(seconds=np.sum(times))) writer.writerow({}) writer.writerow({'Fold': 'Elapsed time: '+duration}) avg_acc_test_val = round(np.average(accs_test_val),3) std_acc_test_val = round(np.std(accs_test_val),3) avg_acc_test_val,avg_a,avg_p_0_U,avg_r_0_U,avg_f_0_U,avg_p_1_U,avg_r_1_U,avg_f_1_U,avg_p_0_L,avg_r_0_L,avg_f_0_L,avg_p_1_L,avg_r_1_L,avg_f_1_L,avg_a_d=averages([accs_test_val,accs,precisions_0_U,recalls_0_U,f1_scores_0_U,precisions_1_U,recalls_1_U,f1_scores_1_U,precisions_0_L,recalls_0_L,f1_scores_0_L,precisions_1_L,recalls_1_L,f1_scores_1_L,accs_dev]) std_acc_test_val,std_a,std_p_0_U,std_r_0_U,std_f_0_U,std_p_1_U,std_r_1_U,std_f_1_U,std_p_0_L,std_r_0_L,std_f_0_L,std_p_1_L,std_r_1_L,std_f_1_L,std_a_d=stds([accs_test_val,accs,precisions_0_U,recalls_0_U,f1_scores_0_U,precisions_1_U,recalls_1_U,f1_scores_1_U,precisions_0_L,recalls_0_L,f1_scores_0_L,precisions_1_L,recalls_1_L,f1_scores_1_L,accs_dev]) writer1.writerow({model_lst[k]: 'Acc_U','Avg': avg_a,'Std_dev': std_acc_test_val}) writer1.writerow({model_lst[k]: 'Acc_L','Avg': avg_acc_test_val,'Std_dev': std_a}) writer1.writerow({model_lst[k]: 'P_0_U','Avg': avg_p_0_U ,'Std_dev': std_p_0_U}) writer1.writerow({model_lst[k]: 'R_0_U','Avg': avg_r_0_U,'Std_dev': std_r_0_U}) writer1.writerow({model_lst[k]: 'F1_0_U','Avg': avg_f_0_U,'Std_dev': std_f_0_U}) writer1.writerow({model_lst[k]: 'P_1_U','Avg': avg_p_1_U,'Std_dev': std_p_1_U}) writer1.writerow({model_lst[k]: 'R_1_U','Avg': avg_r_1_U,'Std_dev': std_r_1_U}) writer1.writerow({model_lst[k]: 'F1_1_U','Avg': avg_f_1_U,'Std_dev': std_f_1_U}) writer1.writerow({model_lst[k]: 'P_0_L','Avg': avg_p_0_L,'Std_dev': std_p_0_L}) writer1.writerow({model_lst[k]: 'R_0_L','Avg': avg_r_0_L,'Std_dev': std_r_0_L}) writer1.writerow({model_lst[k]: 'F1_0_L','Avg': avg_f_0_L,'Std_dev': std_f_0_L}) writer1.writerow({model_lst[k]: 'P_1_L','Avg': avg_p_1_L,'Std_dev': std_p_1_L}) writer1.writerow({model_lst[k]: 'R_1_L','Avg': avg_r_1_L,'Std_dev': std_r_1_L}) writer1.writerow({model_lst[k]: 'F1_1_L','Avg': avg_f_1_L,'Std_dev': std_f_1_L}) writer1.writerow({model_lst[k]: 'Acc_dev','Avg': avg_a_d,'Std_dev': std_a_d}) writer1.writerow({}) writer1.writerow({model_lst[k]: 'Elapsed time: '+duration}) avgtable.column_headers = fieldnames1 avgtable.append_row(['Acc_U',avg_a,std_a]) avgtable.append_row(['Acc_L',avg_acc_test_val,std_acc_test_val]) avgtable.append_row(['P_0_U',avg_p_0_U,std_p_0_U]) avgtable.append_row(['R_0_U',avg_r_0_U,std_r_0_U]) avgtable.append_row(['F1_0_U',avg_f_0_U,std_f_0_U]) avgtable.append_row(['P_1_U',avg_p_1_U,std_p_1_U]) avgtable.append_row(['R_1_U',avg_r_1_U,std_r_1_U]) avgtable.append_row(['F1_1_U',avg_f_1_U,std_f_1_U]) avgtable.append_row(['P_0_L',avg_p_0_L,std_p_0_L]) avgtable.append_row(['R_0_L',avg_r_0_L,std_r_0_L]) avgtable.append_row(['F1_0_L',avg_f_0_L,std_f_0_L]) avgtable.append_row(['P_1_L',avg_p_1_L,std_p_1_L]) avgtable.append_row(['R_1_L',avg_r_1_L,std_r_1_L]) avgtable.append_row(['F1_1_L',avg_f_1_L,std_f_1_L]) avgtable.append_row(['Accuracy_dev',avg_a_d,std_a_d]) print(avgtable) logfile.write(str(avgtable) + "\n") msg = 'Elapsed time: '+ duration + '\n\n' print(msg) logfile.write(msg ) logfile.close()
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with readit. If not, see <http://www.gnu.org/licenses/>. ''' import sqlite3 # library of database used for project import datetime # used for getting current time and date from beautifultable import BeautifulTable # display output in table format import webbrowser # used to open url in browser import os # used to find home directory of user date = datetime.date.today() table = BeautifulTable() table.left_border_char = '|' table.right_border_char = '|' table.top_border_char = '=' table.header_seperator_char = '=' table.column_headers = ["ID", "URL", "TAG", "DATE", "TIME"] class DatabaseConnection(object): def __init__(self): """ Calls the function init_db(). """ self.init_db("", "") def init_db(self, cursor, db):
def run(self): """run the full lifecycle flow of this deployment""" # pylint: disable=too-many-statements disable=too-many-branches if self.do_starter_test and not self.remote: detect_file_ulimit() self.progress(False, "Runner of type {0}".format(str(self.name)), "<3") if self.do_install or self.do_system_test: self.progress( False, "INSTALLATION for {0}".format(str(self.name)), ) self.install(self.old_installer) else: self.basecfg.set_directories(self.old_installer.cfg) if self.do_starter_test: self.progress( False, "PREPARING DEPLOYMENT of {0}".format(str(self.name)), ) self.starter_prepare_env() self.starter_run() self.finish_setup() self.make_data() if self.selenium: self.set_selenium_instances() self.selenium.test_empty_ui() ti.prompt_user( self.basecfg, "{0}{1} Deployment started. Please test the UI!".format((self.versionstr), str(self.name)), ) if self.hot_backup: self.progress(False, "TESTING HOTBACKUP") self.backup_name = self.create_backup("thy_name_is_" + self.name) self.validate_local_backup(self.backup_name) self.tcp_ping_all_nodes() self.create_non_backup_data() backups = self.list_backup() print(backups) self.upload_backup(backups[0]) self.tcp_ping_all_nodes() self.delete_backup(backups[0]) self.tcp_ping_all_nodes() backups = self.list_backup() if len(backups) != 0: raise Exception("expected backup to be gone, " "but its still there: " + str(backups)) self.download_backup(self.backup_name) self.validate_local_backup(self.backup_name) self.tcp_ping_all_nodes() backups = self.list_backup() if backups[0] != self.backup_name: raise Exception("downloaded backup has different name? " + str(backups)) self.before_backup() self.restore_backup(backups[0]) self.tcp_ping_all_nodes() self.after_backup() self.check_data_impl() if not self.check_non_backup_data(): raise Exception("data created after backup" " is still there??") if self.new_installer: if self.hot_backup: self.create_non_backup_data() self.versionstr = "NEW[" + self.new_cfg.version + "] " self.progress( False, "UPGRADE OF DEPLOYMENT {0}".format(str(self.name)), ) self.new_installer.calculate_package_names() self.new_installer.upgrade_server_package(self.old_installer) lh.subsection("outputting version") self.new_installer.output_arangod_version() self.new_installer.get_starter_version() self.new_installer.get_sync_version() self.new_installer.stop_service() self.cfg.set_directories(self.new_installer.cfg) self.new_cfg.set_directories(self.new_installer.cfg) self.upgrade_arangod_version() # make sure to pass new version self.old_installer.un_install_server_package_for_upgrade() if self.is_minor_upgrade() and self.new_installer.supports_backup(): self.new_installer.check_backup_is_created() if self.hot_backup: self.check_data_impl() self.progress(False, "TESTING HOTBACKUP AFTER UPGRADE") backups = self.list_backup() print(backups) self.upload_backup(backups[0]) self.tcp_ping_all_nodes() self.delete_backup(backups[0]) self.tcp_ping_all_nodes() backups = self.list_backup() if len(backups) != 0: raise Exception("expected backup to be gone, " "but its still there: " + str(backups)) self.download_backup(self.backup_name) self.validate_local_backup(self.backup_name) self.tcp_ping_all_nodes() backups = self.list_backup() if backups[0] != self.backup_name: raise Exception("downloaded backup has different name? " + str(backups)) time.sleep(20) # TODO fix self.before_backup() self.restore_backup(backups[0]) self.tcp_ping_all_nodes() self.after_backup() if not self.check_non_backup_data(): raise Exception("data created after " "backup is still there??") self.check_data_impl() else: logging.info("skipping upgrade step no new version given") try: if self.do_starter_test: self.progress( False, "{0} TESTS FOR {1}".format(self.testrun_name, str(self.name)), ) self.test_setup() self.jam_attempt() self.starter_shutdown() for starter in self.starter_instances: starter.detect_fatal_errors() if self.do_uninstall: self.uninstall(self.old_installer if not self.new_installer else self.new_installer) finally: if self.selenium: ui_test_results_table = BeautifulTable(maxwidth=160) for result in self.selenium.test_results: ui_test_results_table.rows.append( [result.name, "PASSED" if result.success else "FAILED", result.message, result.traceback] ) if not result.success: self.ui_tests_failed = True ui_test_results_table.columns.header = ["Name", "Result", "Message", "Traceback"] self.progress(False, "UI test results table:", supress_allure=True) self.progress(False, "\n" + str(ui_test_results_table), supress_allure=True) self.ui_test_results_table = ui_test_results_table self.quit_selenium() self.progress(False, "Runner of type {0} - Finished!".format(str(self.name)))
for _ in range(num_of_iterations): m = 3 begin_time = time.perf_counter() main() kohren() t_list = studens() fisher = fish() end_time = time.perf_counter() counted_time += end_time - begin_time average_time = counted_time / num_of_iterations plan_table = BeautifulTable() headers_x = ['X{}'.format(i) for i in range(1, m + 1)] headers_y = ['Y{}'.format(i) for i in range(1, m + 1)] plan_table.columns.header = [*headers_x, *headers_y] for i in range(len(xn)): plan_table.rows.append([*plan_matrix_for_output[i], *y_matrix[i]]) print('Матриця планування:') print(plan_table) print('Normalized equations:') result_y = [] for i in range(len(plan_matrix_x[0])): result_y.append(b0 + b1 * plan_matrix_for_output[i][0] + b2 * plan_matrix_for_output[i][1] + b3 * plan_matrix_for_output[i][2]) if round(result_y[i], 3) == round(average_y[i], 3):
def show_results(self): # This is the final step of estimation. new_landing_rates, new_landing_volumes, model, which_months = self.check_regions() models = { "1": "CPM", "2": "CPI", "3": "CPA" } months = { 1: "Январь", 2: "Февраль", 3: "Март", 4: "Апрель", 5: "Май", 6: "Июнь", 7: "Июль", 8: "Август", 9: "Сентябрь", 10: "Октябрь", 11: "Ноябрь", 12: "Декабрь" } now = datetime.datetime.now() print("\n🏆 Моя рекомендация будет следующей: 🏆") print("\n--- Лендинг 📄 ---") table_landing = BeautifulTable() table_landing.append_column( "Источник", [key for key, value in new_landing_rates.items()]) table_landing.append_column( f"Ставка {models[model]}", [(str(rate)).replace(".", ",") for key, rate in new_landing_rates.items()]) volumes_all_together = [] for month in which_months: number_of_days_in_month = int(calendar.monthrange(now.year, month)[1]) if month == 2 or month == 11: volumes = [int(((volume / 1000) / INCREASE_FEBRUARY_NOVEMBER) * number_of_days_in_month) if model == "1" else int((volume / INCREASE_FEBRUARY_NOVEMBER) * number_of_days_in_month) for key, volume in new_landing_volumes.items()] volumes_all_together.append(volumes) table_landing.append_column(months[month], volumes) elif month == 12: volumes = [int(((volume / 1000) / INCREASE_DECEMBER) * number_of_days_in_month) if model == "1" else int((volume / INCREASE_DECEMBER) * number_of_days_in_month) for key, volume in new_landing_volumes.items()] volumes_all_together.append(volumes) table_landing.append_column(months[month], volumes) else: volumes = [int((volume / 1000) * number_of_days_in_month) if model == "1" else int(volume * number_of_days_in_month) for key, volume in new_landing_volumes.items()] volumes_all_together.append(volumes) table_landing.append_column(months[month], volumes) budgets = {} n = 0 for volume in numpy.sum(volumes_all_together, axis=0): if n <= len(new_landing_rates) - 1: key = list(new_landing_rates.keys())[n] rate = list(new_landing_rates.values())[n] budgets[key] = volume * rate n += 1 table_landing.append_column("Всего без НДС", [int(spend) for key, spend in budgets.items()]) print(table_landing) print("\n") for key, spend in budgets.items(): if spend < BUDGET_BOTTOM[key]: difference = BUDGET_BOTTOM[key] - spend print(f"🚨 Обрати внимание, что мы не добираем {int(difference)} до минимального бюджета по {key} для iOS!") else: pass