def main(): log_file = "automated_test_result.txt" summary_file = "automated_test_summary.txt" parser = argparse.ArgumentParser(description='pyOCD automated testing') parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging') args = parser.parse_args() # Setup logging if os.path.exists(log_file): os.remove(log_file) level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level) logger = Logger(log_file) sys.stdout = logger sys.stderr = logger test_list = [] board_list = [] result_list = [] # Put together list of tests test = Test("Basic Test", lambda board: basic_test(board, None)) test_list.append(test) test_list.append(GdbServerJsonTest()) test_list.append(SpeedTest()) test_list.append(CortexTest()) test_list.append(FlashTest()) test_list.append(GdbTest()) # Put together list of boards to test board_list = MbedBoard.getAllConnectedBoards(close=True, blocking=False) start = time() for board in board_list: print("--------------------------") print("TESTING BOARD %s" % board.getUniqueID()) print("--------------------------") for test in test_list: test_start = time() result = test.run(board) test_stop = time() result.time = test_stop - test_start result_list.append(result) stop = time() test_time = (stop - start) print_summary(test_list, result_list, test_time) with open(summary_file, "wb") as output_file: print_summary(test_list, result_list, test_time, output_file) exit_val = 0 if Test.all_tests_pass(result_list) else -1 exit(exit_val)
def print_summary(test_list, result_list, test_time, output_file=None): for test in test_list: test.print_perf_info(result_list, output_file=output_file) Test.print_results(result_list, output_file=output_file) print("", file=output_file) print("Test Time: %s" % test_time, file=output_file) if Test.all_tests_pass(result_list): print("All tests passed", file=output_file) else: print("One or more tests has failed!", file=output_file)
def main(): parser = argparse.ArgumentParser(description='pyOCD automated testing') parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging') parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job') parser.add_argument('-j', '--jobs', action="store", default=1, type=int, metavar="JOBS", help='Set number of concurrent board tests (default is 1)') parser.add_argument('-b', '--board', action="append", metavar="ID", help="Limit testing to boards with specified unique IDs. Multiple boards can be listed.") args = parser.parse_args() # Allow CI to override the number of concurrent jobs. if 'CI_JOBS' in os.environ: args.jobs = int(os.environ['CI_JOBS']) # Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses # fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec() # to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python # version 3.4+ is the multiprocessing.set_start_method() API available that lets us # switch to the 'spawn' method, i.e. exec(). if args.jobs > 1 and sys.platform.startswith('darwin') and sys.version_info[0:2] < (3, 4): print("WARNING: Cannot support multiple jobs on macOS prior to Python 3.4. Forcing 1 job.") args.jobs = 1 # Setup logging based on concurrency and quiet option. level = logging.DEBUG if args.debug else logging.INFO if args.jobs == 1 and not args.quiet: log_file = LOG_FILE_TEMPLATE.format(get_env_file_name()) # Create common log file. if os.path.exists(log_file): os.remove(log_file) logToConsole = True commonLogFile = open(log_file, "a") else: logToConsole = False commonLogFile = None board_list = [] result_list = [] # Put together list of boards to test board_list = ConnectHelper.get_all_connected_probes(blocking=False) board_id_list = sorted(b.unique_id for b in board_list) # Filter boards. if args.board: board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())] # If only 1 job was requested, don't bother spawning processes. start = time() if args.jobs == 1: for n, board_id in enumerate(board_id_list): result_list += test_board(board_id, n, level, logToConsole, commonLogFile) else: # Create a pool of processes to run tests. try: pool = mp.Pool(args.jobs) # Issue board test job to process pool. async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile)) for n, board_id in enumerate(board_id_list)] # Gather results. for r in async_results: result_list += r.get(timeout=JOB_TIMEOUT) finally: pool.close() pool.join() stop = time() test_time = (stop - start) print_summary(test_list, result_list, test_time) summary_file = SUMMARY_FILE_TEMPLATE.format(get_env_file_name()) with open(summary_file, "w") as output_file: print_summary(test_list, result_list, test_time, output_file) generate_xml_results(result_list) exit_val = 0 if Test.all_tests_pass(result_list) else -1 exit(exit_val)
# Setup logging if os.path.exists(log_file): os.remove(log_file) level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level) logger = Logger(log_file) sys.stdout = logger sys.stderr = logger test_list = [] board_list = [] result_list = [] # Put together list of tests test = Test("Basic Test", lambda board: basic_test(board, None)) test_list.append(test) test_list.append(SpeedTest()) test_list.append(CortexTest()) test_list.append(FlashTest()) test_list.append(GdbTest()) # Put together list of boards to test board_list = MbedBoard.getAllConnectedBoards(close=True, blocking=False) start = time() for board in board_list: print("--------------------------") print("TESTING BOARD %s" % board.getUniqueID()) print("--------------------------") for test in test_list:
class LinRegression: def __init__(self): self.lr = LinearRegression() self.file_io = FileIO() #self.pca = PCAProcess() #self.chart = DrawChart() self.test = Test() self.individual = IndividualTest() self.sc = StandardScaler() self.ms = MinMaxScaler() self.drop_na = DropNaN() def regression(self, in_path, out_path): # ファイルオープン処理 org_df = self.file_io.open_file_as_pandas(in_path,"utf-8") feat_shop = self.file_io.open_file_as_pandas('./data/out/feat_shop.csv','utf-8') feat_pref = self.file_io.open_file_as_pandas('./data/out/feat_pref.csv','utf-8') ''' # 目的変数 org_df['支払合計'] = org_df['現金外支払合計'] + org_df['現金支払合計'] # 不要な説明変数削除 org_df = org_df.drop(['現金外支払合計', '現金支払合計'],axis=1) # 目的変数がゼロ以下の行を削除 org_df = org_df.drop(org_df[org_df['支払合計']==0].index) # 欠損値が多すぎる列を削除 #org_df = org_df.drop(['売上単価'],axis=1) # 目的変数が欠損値の行を削除 org_df = org_df.dropna(subset=['支払合計']) ''' # shop追加 #org_df = pd.merge(org_df, feat_shop, on='顧客ID',how='left') org_df = pd.merge(org_df, feat_pref, on='顧客ID',how='left') org_df = org_df.drop(['Unnamed: 0_x','Unnamed: 0_y'],axis=1) org_df = org_df[org_df.columns.drop(list(org_df.filter(regex='Unnamed:')))] # スコア=0を削除 org_df = org_df.drop(org_df[org_df['スコア']<=0].index) # 不要列削除 #org_df = org_df.drop(['Unnamed: 0', '顧客ID'], axis=1) org_df = org_df.drop(['顧客ID'],axis=1) #org_df = org_df[org_df.columns.drop(list(org_df.filter(regex='Unnamed:')))] #org_df = org_df.columns.drop(org_df.columns.str.contains('Unnamed:')) # 欠損値が70%以上の列を削除 #org_df = self.drop_na.drop_na_col(org_df, len(org_df), 0.7) #print('\n rows of org_df is:') #print(len(org_df)) #print(type(len(org_df))) # 欠損値をゼロうめ org_df = org_df.fillna(0) # 目的変数Xと説明変数Y #Y = org_df['売上'] Y = org_df['スコア'] #X = org_df.drop(['支払合計'],axis=1) X = org_df.drop(['商品コード','売上単価','数量','売上','明細ID','スコア'],axis=1) X = X.drop(['キャンセル回数','コンタクト回数','問い合わせ回数'],axis=1) X = X.drop(['治療送客回数_あり','治療送客回数_なし','院長挨拶回数_あり','院長挨拶回数_なし','紹介カード受渡回数_あり','紹介カード受渡回数_なし','携帯TEL_有','携帯メール_有','性別_女','性別_男','自宅TEL_有','PCメール_有'],axis=1) #X = X.drop(['職業_学生','職業_会社員','職業_主婦','職業_自営業','職業_その他','職業_パート・アルバイト'],axis=1) X = X.drop(['登録区分_HP','登録区分_店舗','登録区分_CC'],axis=1) #X = X.drop(['生年月日','滞在時間','閲覧ページ総数','閲覧ページ数/セッション'],axis=1) X = X[X.columns.drop(list(org_df.filter(regex='_nan')))] #X = X[X.columns.drop(list(org_df.filter(regex='_なし')))] X = X[X.columns.drop(list(org_df.filter(regex='_空欄')))] X = X[X.columns.drop(list(org_df.filter(regex='_無')))] X = X[X.columns.drop(list(org_df.filter(regex='_削除')))] X = X[X.columns.drop(list(org_df.filter(regex='施術時間')))] ''' X = X[X.columns.drop(list(org_df.filter(regex='キャンセル回数')))] X = X[X.columns.drop(list(org_df.filter(regex='コンタクト回数')))] X = X[X.columns.drop(list(org_df.filter(regex='問い合わせ回数')))] X = X[X.columns.drop(list(org_df.filter(regex='滞在時間')))] X = X[X.columns.drop(list(org_df.filter(regex='閲覧ページ総数')))] X = X[X.columns.drop(list(org_df.filter(regex='閲覧ページ数/セッション')))] ''' # 標準化 #std_Y = pd.DataFrame(self.sc.fit_transform(Y)) #std_Y.columns = Y.columns #std_X = pd.DataFrame(self.sc.fit_transform(X)) #std_X.columns = X.columns # 正規化 #norm_Y = pd.DataFrame(self.ms.fit_transform(Y)) #norm_Y.columns = Y.columns #norm_X = pd.DataFrame(self.ms.fit_transform(X)) #norm_X.columns = X.columns #self.file_io.export_csv_from_pandas(X, './data/out/X.csv') # トレーニングデータとテストデータに分割(30%) X_train, X_test, Y_train, Y_test = self.test.make_train_test_data(X, Y, 0.3) print(X_train.head()) print("--- X_train's shape ---\n {}\n".format(X_train.shape)) print(X_test.head()) print("--- X_test's shape ---\n {}\n".format(X_test.shape)) print(Y_train.head()) print("--- Y_train's shape ---\n {}\n".format(Y_train.shape)) print(Y_test.head()) print("--- Y_test's shape ---\n {}\n".format(Y_test.shape)) # 重回帰分析を実施 self.lr.fit(X_train, Y_train) # 偏回帰係数 print(pd.DataFrame({"Name":X.columns, "Coefficients":self.lr.coef_}).sort_values(by='Coefficients') ) # 切片 (誤差) print(self.lr.intercept_) # pandasファイル作成 org_pd = pd.DataFrame({"Name":X.columns, "Coefficients":self.lr.coef_}) # ファイルアウトプット self.file_io.export_csv_from_pandas(org_pd, "./data/out/linear_regression.csv") # 精度を算出 # トレーニングデータ print(" --- train score ---\n {}\n".format(self.lr.score(X_train,Y_train))) # テストデータ print(" --- test score ---\n {}\n".format(self.lr.score(X_test,Y_test))) return self.lr.score(X_train,Y_train), self.lr.score(X_test,Y_test)
def main(): parser = argparse.ArgumentParser(description='pyOCD automated testing') parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging') parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job') parser.add_argument('-j', '--jobs', action="store", default=1, type=int, metavar="JOBS", help='Set number of concurrent board tests (default is 1)') args = parser.parse_args() # Force jobs to 1 when running under CI until concurrency issues with enumerating boards are # solved. Specifically, the connect test has intermittently failed to open boards on Linux and # Win7. This is only done under CI, and in this script, to make testing concurrent runs easy. if 'CI_TEST' in os.environ: args.jobs = 1 # Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses # fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec() # to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python # version 3.4+ is the multiprocessing.set_start_method() API available that lets us # switch to the 'spawn' method, i.e. exec(). if args.jobs > 1 and sys.platform.startswith('darwin') and sys.version_info[0:2] < (3, 4): print("WARNING: Cannot support multiple jobs on macOS prior to Python 3.4. Forcing 1 job.") args.jobs = 1 # Setup logging based on concurrency and quiet option. level = logging.DEBUG if args.debug else logging.INFO if args.jobs == 1 and not args.quiet: # Create common log file. if os.path.exists(LOG_FILE): os.remove(LOG_FILE) logToConsole = True commonLogFile = open(LOG_FILE, "a") else: logToConsole = False commonLogFile = None board_list = [] result_list = [] # Put together list of boards to test board_list = MbedBoard.getAllConnectedBoards(close=True, blocking=False) board_id_list = sorted(b.getUniqueID() for b in board_list) # If only 1 job was requested, don't bother spawning processes. start = time() if args.jobs == 1: for n, board_id in enumerate(board_id_list): result_list += test_board(board_id, n, level, logToConsole, commonLogFile) else: # Create a pool of processes to run tests. try: pool = mp.Pool(args.jobs) # Issue board test job to process pool. async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile)) for n, board_id in enumerate(board_id_list)] # Gather results. for r in async_results: result_list += r.get(timeout=JOB_TIMEOUT) finally: pool.close() pool.join() stop = time() test_time = (stop - start) print_summary(test_list, result_list, test_time) with open(SUMMARY_FILE, "w") as output_file: print_summary(test_list, result_list, test_time, output_file) generate_xml_results(result_list) exit_val = 0 if Test.all_tests_pass(result_list) else -1 exit(exit_val)
def main(): parser = argparse.ArgumentParser(description='pyOCD automated testing') parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging') parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job') parser.add_argument('-j', '--jobs', action="store", default=1, type=int, metavar="JOBS", help='Set number of concurrent board tests (default is 1)') parser.add_argument('-b', '--board', action="append", metavar="ID", help="Limit testing to boards with specified unique IDs. Multiple boards can be listed.") args = parser.parse_args() # Force jobs to 1 when running under CI until concurrency issues with enumerating boards are # solved. Specifically, the connect test has intermittently failed to open boards on Linux and # Win7. This is only done under CI, and in this script, to make testing concurrent runs easy. if 'CI_TEST' in os.environ: args.jobs = 1 # Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses # fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec() # to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python # version 3.4+ is the multiprocessing.set_start_method() API available that lets us # switch to the 'spawn' method, i.e. exec(). if args.jobs > 1 and sys.platform.startswith('darwin') and sys.version_info[0:2] < (3, 4): print("WARNING: Cannot support multiple jobs on macOS prior to Python 3.4. Forcing 1 job.") args.jobs = 1 # Setup logging based on concurrency and quiet option. level = logging.DEBUG if args.debug else logging.INFO if args.jobs == 1 and not args.quiet: # Create common log file. if os.path.exists(LOG_FILE): os.remove(LOG_FILE) logToConsole = True commonLogFile = open(LOG_FILE, "a") else: logToConsole = False commonLogFile = None board_list = [] result_list = [] # Put together list of boards to test board_list = ConnectHelper.get_all_connected_probes(blocking=False) board_id_list = sorted(b.unique_id for b in board_list) # Filter boards. if args.board: board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())] # If only 1 job was requested, don't bother spawning processes. start = time() if args.jobs == 1: for n, board_id in enumerate(board_id_list): result_list += test_board(board_id, n, level, logToConsole, commonLogFile) else: # Create a pool of processes to run tests. try: pool = mp.Pool(args.jobs) # Issue board test job to process pool. async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile)) for n, board_id in enumerate(board_id_list)] # Gather results. for r in async_results: result_list += r.get(timeout=JOB_TIMEOUT) finally: pool.close() pool.join() stop = time() test_time = (stop - start) print_summary(test_list, result_list, test_time) with open(SUMMARY_FILE, "w") as output_file: print_summary(test_list, result_list, test_time, output_file) generate_xml_results(result_list) exit_val = 0 if Test.all_tests_pass(result_list) else -1 exit(exit_val)
def main(): log_file = "automated_test_result.txt" summary_file = "automated_test_summary.txt" parser = argparse.ArgumentParser(description='pyOCD automated testing') parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging') parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job') parser.add_argument( '-j', '--jobs', action="store", default=1, type=int, metavar="JOBS", help='Set number of concurrent board tests (default is 1)') args = parser.parse_args() # Setup logging if os.path.exists(log_file): os.remove(log_file) level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level) logger = Logger(log_file) sys.stdout = logger sys.stderr = logger test_list = [] board_list = [] result_list = [] # Put together list of tests test_list.append(BasicTest()) test_list.append(GdbServerJsonTest()) test_list.append(ConnectTest()) test_list.append(SpeedTest()) test_list.append(CortexTest()) test_list.append(FlashTest()) test_list.append(GdbTest()) # Put together list of boards to test board_list = MbedBoard.getAllConnectedBoards(close=True, blocking=False) start = time() for board in board_list: print("--------------------------") print("TESTING BOARD %s" % board.getUniqueID()) print("--------------------------") for test in test_list: test_start = time() result = test.run(board) test_stop = time() result.time = test_stop - test_start result_list.append(result) stop = time() test_time = (stop - start) print_summary(test_list, result_list, test_time) with open(summary_file, "w") as output_file: print_summary(test_list, result_list, test_time, output_file) generate_xml_results(result_list) exit_val = 0 if Test.all_tests_pass(result_list) else -1 exit(exit_val)
class LinRegression2: def __init__(self): self.lr = LinearRegression() self.file_io = FileIO() #self.pca = PCAProcess() #self.chart = DrawChart() self.test = Test() self.individual = IndividualTest() self.sc = StandardScaler() self.ms = MinMaxScaler() self.drop_na = DropNaN() def regression(self, in_path, out_path): # ファイルオープン処理 org_df = self.file_io.open_file_as_pandas(in_path, "utf-8") ''' # 目的変数 org_df['支払合計'] = org_df['現金外支払合計'] + org_df['現金支払合計'] # 不要な説明変数削除 org_df = org_df.drop(['現金外支払合計', '現金支払合計'],axis=1) # 目的変数がゼロ以下の行を削除 org_df = org_df.drop(org_df[org_df['支払合計']==0].index) # 欠損値が多すぎる列を削除 #org_df = org_df.drop(['売上単価'],axis=1) # 目的変数が欠損値の行を削除 org_df = org_df.dropna(subset=['支払合計']) ''' # スコア=0を削除 org_df = org_df.drop(org_df[org_df['スコア'] <= 0].index) # 不要列削除 #org_df = org_df.drop(['Unnamed: 0', '顧客ID'], axis=1) org_df = org_df.drop(['顧客ID'], axis=1) org_df = org_df[org_df.columns.drop( list(org_df.filter(regex='Unnamed:')))] # 欠損値が70%以上の列を削除 #org_df = self.drop_na.drop_na_col(org_df, len(org_df), 0.7) #print('\n rows of org_df is:') #print(len(org_df)) #print(type(len(org_df))) # 欠損値をゼロうめ org_df = org_df.fillna(0) # 目的変数Xと説明変数Y #Y = org_df['支払合計'] Y = org_df['スコア'] #X = org_df.drop(['支払合計'],axis=1) X = org_df.drop(['商品コード', '売上単価', '数量', '売上', '明細ID', 'スコア'], axis=1) # 属性情報削除 X = X.drop(['滞在時間'], axis=1) X = X.drop(['キャンセル回数', 'コンタクト回数', '問い合わせ回数'], axis=1) X = X[X.columns.drop(list(org_df.filter(regex='施術時間')))] X = X[X.columns.drop(list(org_df.filter(regex='指名回数')))] X = X[X.columns.drop(list(org_df.filter(regex='コース受諾回数')))] X = X[X.columns.drop(list(org_df.filter(regex='紹介カード受渡回数')))] X = X[X.columns.drop(list(org_df.filter(regex='治療送客回数')))] X = X[X.columns.drop(list(org_df.filter(regex='院長挨拶回数')))] X = X[X.columns.drop(list(org_df.filter(regex='性別')))] X = X[X.columns.drop(list(org_df.filter(regex='携帯TEL')))] X = X[X.columns.drop(list(org_df.filter(regex='自宅TEL')))] X = X[X.columns.drop(list(org_df.filter(regex='携帯メール')))] X = X[X.columns.drop(list(org_df.filter(regex='PCメール')))] X = X[X.columns.drop(list(org_df.filter(regex='職業')))] X = X[X.columns.drop(list(org_df.filter(regex='登録区分')))] # 標準化 #std_Y = pd.DataFrame(self.sc.fit_transform(Y)) #std_Y.columns = Y.columns #std_X = pd.DataFrame(self.sc.fit_transform(X)) #std_X.columns = X.columns # 正規化 #norm_Y = pd.DataFrame(self.ms.fit_transform(Y)) #norm_Y.columns = Y.columns #norm_X = pd.DataFrame(self.ms.fit_transform(X)) #norm_X.columns = X.columns #self.file_io.export_csv_from_pandas(X, './data/out/X.csv') # トレーニングデータとテストデータに分割(30%) X_train, X_test, Y_train, Y_test = self.test.make_train_test_data( X, Y, 0.3) print(X_train.head()) print("--- X_train's shape ---\n {}\n".format(X_train.shape)) print(X_test.head()) print("--- X_test's shape ---\n {}\n".format(X_test.shape)) print(Y_train.head()) print("--- Y_train's shape ---\n {}\n".format(Y_train.shape)) print(Y_test.head()) print("--- Y_test's shape ---\n {}\n".format(Y_test.shape)) # 重回帰分析を実施 self.lr.fit(X_train, Y_train) # 偏回帰係数 print( pd.DataFrame({ "Name": X.columns, "Coefficients": self.lr.coef_ }).sort_values(by='Coefficients')) # 切片 (誤差) print(self.lr.intercept_) # pandasファイル作成 org_pd = pd.DataFrame({ "Name": X.columns, "Coefficients": self.lr.coef_ }) # ファイルアウトプット self.file_io.export_csv_from_pandas( org_pd, "./data/out/linear_regression.csv") # 精度を算出 # トレーニングデータ print(" --- train score ---\n {}\n".format( self.lr.score(X_train, Y_train))) # テストデータ print(" --- test score ---\n {}\n".format(self.lr.score( X_test, Y_test))) return self.lr.score(X_train, Y_train), self.lr.score(X_test, Y_test)