def write(self, msg): read, write, opt = select.select([ ], [ self.sock ], [ ], 5) if not len(write): my_print('Error: Cannot communicate with the server') self.socket.close() quit_client() os.write(self.sockfd, str(msg) + '\n')
def log(self, user_id=None, user_name=None, op_type=None, assets_code=None, assets_name=None, _log=None, is_commit=False, is_print=True): self._dao.insert_log(self._db, user_id=user_id, user_name=user_name, op_type=op_type, assets_code=assets_code, assets_name=assets_name, _log=_log, is_commit=is_commit) if is_print: str_log = "user: {0} type: {1} assets: {2}({3}) _log: {4}".format(user_id, op_type, assets_name, assets_code, _log) my_print(str_log)
def train(epoch): print('\nEpoch: %d' % epoch) net.train() train_loss = 0 cul_mse_loss = 0 cul_clf_loss = 0 n_batch = 0 for batch_idx, (inputs, labels, line_masks, clf_line) in enumerate(trainloader): inputs = inputs.float().cuda() labels = labels.float().cuda() line_masks = line_masks.float().cuda() clf_line = clf_line.long().cuda() inputs = Variable(inputs) labels = Variable(labels, requires_grad=False) line_masks = Variable(line_masks, requires_grad=False) clf_line = Variable(clf_line, requires_grad=False) optimizer.zero_grad() pred_pos, pred_prob = net(inputs, config.max_line, labels, clf_line) mse_loss, clf_loss = criterion(pred_pos, pred_prob, labels, line_masks, clf_line) loss = mse_loss + config.clf_weight*clf_loss loss.backward() optimizer.step() train_loss += loss.data[0] cul_mse_loss += mse_loss.data[0] cul_clf_loss += clf_loss.data[0] n_batch += 1 progress_bar(batch_idx, len(trainloader), 'Loss: %.5f %.5f %.5f %.5f %.5f %.5f' % (mse_loss.data[0], clf_loss.data[0], loss.data[0], cul_mse_loss/(batch_idx+1), cul_clf_loss/(batch_idx+1), train_loss/(batch_idx+1))) # if config.debugging and (batch_idx+1)%config.debugging_iter == 0: # sys.exit(0) my_print('Epoch: %d Training: Loss: %.5f %.5f %.5f' % (epoch, cul_mse_loss/n_batch, cul_clf_loss/n_batch, train_loss/n_batch)) net.ss_prob += 0.1
def write(self, msg): read, write, opt = select.select([], [self.sock], [], 5) if not len(write): my_print('Error: Cannot communicate with the server') self.socket.close() quit_client() os.write(self.sockfd, str(msg) + '\n')
def test_00400_do_get_image(self): """根据资产代码能找到原始图片""" my_print('准备测试:' + self.test_00400_do_get_image.__doc__) self.req.parameters["action"] = 'get_image' b_ret = servlet_assets.do_get(self.req) self.assertEqual(b_ret, True, '执行成功,找到图片') self.assertEqual(self.req.res_body[:2], b'\xff\xd8') self.assertEqual(self.req.res_body[-2:], b'\xff\xd9')
def train(epoch): print('\nEpoch: %d' % epoch) net.train() train_loss = 0 cul_mse_loss = 0 cul_point_clf_loss = 0 cul_spline_clf_loss = 0 n_batch = 0 for batch_idx, (inputs, labels, line_masks, clf_line, point_masks, clf_point) in enumerate(trainloader): inputs = inputs.float().cuda() labels = labels.float().cuda() line_masks = line_masks.float().cuda() clf_line = clf_line.long().cuda() point_masks = point_masks.float().cuda() clf_point = clf_point.long().cuda() inputs = Variable(inputs) labels = Variable(labels, requires_grad=False) line_masks = Variable(line_masks, requires_grad=False) clf_line = Variable(clf_line, requires_grad=False) point_masks = Variable(point_masks, requires_grad=False) clf_point = Variable(clf_point, requires_grad=False) optimizer.zero_grad() # import pudb # pu.db point_pos_pred, point_prob_pred, line_prob_pred = net( inputs, config.max_line) mse_loss, spline_clf_loss, point_clf_loss = criterion( point_pos_pred, point_prob_pred, line_prob_pred, labels, line_masks, clf_line, point_masks, clf_point) loss = mse_loss + config.clf_weight * (2 * point_clf_loss + spline_clf_loss) loss.backward() optimizer.step() #train_loss += loss.data[0] train_loss += loss.data.item() cul_mse_loss += mse_loss.data.item() cul_point_clf_loss += point_clf_loss.data.item() cul_spline_clf_loss += spline_clf_loss.data.item() n_batch += 1 # progress_bar(batch_idx, len(trainloader), 'Loss: %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f' # % (mse_loss.data[0], point_clf_loss.data[0], spline_clf_loss.data[0], loss.data[0], # cul_mse_loss/(batch_idx+1), cul_point_clf_loss/(batch_idx+1), cul_spline_clf_loss / (batch_idx+1), train_loss/(batch_idx+1))) progress_bar( batch_idx, len(trainloader), 'Loss: %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f' % (mse_loss.item(), point_clf_loss.item(), spline_clf_loss.item(), loss.item(), cul_mse_loss / (batch_idx + 1), cul_point_clf_loss / (batch_idx + 1), cul_spline_clf_loss / (batch_idx + 1), train_loss / (batch_idx + 1))) # if config.debugging and (batch_idx+1)%config.debugging_iter == 0: # sys.exit(0) my_print('Epoch: %d Training: Loss: %.5f %.5f %.5f %.5f' % (epoch, cul_mse_loss / n_batch, cul_point_clf_loss / n_batch, cul_spline_clf_loss / n_batch, train_loss / n_batch))
def test(epoch): net.eval() test_predict = [] test_label = [] test_loss = 0.0 cul_mse_loss = 0 cul_clf_loss = 0 loss = 0 n_batch = 0 for batch_idx, (inputs, labels, line_masks, clf_line) in enumerate(testloader): test_label.append(labels) inputs = inputs.float().cuda() labels = labels.float().cuda() line_masks = line_masks.float().cuda() clf_line = clf_line.long().cuda() inputs = Variable(inputs) labels = Variable(labels, requires_grad=False) line_masks = Variable(line_masks, requires_grad=False) clf_line = Variable(clf_line, requires_grad=False) pred_pos, pred_prob = net(inputs, config.max_line, labels, clf_line) test_predict.append((pred_pos.cpu().data, pred_prob.cpu().data)) # test_predict.append((pred_pos.cpu().data, pred_prob.cpu().data)) mse_loss, clf_loss = criterion(pred_pos, pred_prob, labels, line_masks, clf_line) loss = mse_loss + config.clf_weight*clf_loss test_loss += loss.data[0] cul_mse_loss += mse_loss.data[0] cul_clf_loss += clf_loss.data[0] n_batch += 1 progress_bar(batch_idx, len(testloader), 'Loss: %.5f %.5f %.5f %.5f %.5f %.5f' % (mse_loss.data[0], clf_loss.data[0], loss.data[0], cul_mse_loss / (batch_idx + 1), cul_clf_loss / (batch_idx + 1), test_loss/(batch_idx+1))) my_print('Epoch: %d Testing:Loss: %.5f %.5f %.5f %.5f' % (epoch, cul_mse_loss/n_batch, cul_clf_loss/n_batch, loss.data[0], test_loss / n_batch)) print('Saving..') state = { 'net': net, 'epoch': epoch, } if not os.path.isdir(config.checkpoint_path): os.makedirs(config.checkpoint_path) torch.save(state, os.path.join(config.checkpoint_path, 'chkt_%d'%(epoch))) score_state = { 'test_predict': test_predict, 'test_label': test_label, } test_predict = [] test_label = [] if not os.path.isdir(os.path.join(config.checkpoint_path, 'prediction')): os.makedirs(os.path.join(config.checkpoint_path, 'prediction')) torch.save(score_state, os.path.join(config.checkpoint_path, 'prediction/result_epoch_%d'%(epoch))) print('Saved at %s'%(config.checkpoint_path))
def test_00500_do_biz(self): """资产代码code、用户login_name不能成功借出不存在的资产""" my_print('准备测试:' + self.test_00500_do_biz.__doc__) self.req.parameters['code'] = 'no_assets_code' b_ret = servlet_assets.do_post(self.req) d = dict() d.update(json.loads(self.req.res_body.decode('UTF-8'))) self.assertEqual(b_ret, True, '没有借出成功') self.assertEqual(d.get('status'), Const.OpStatus.其他.value, '不能借出不存在的物品') self.assertEqual(d.get('message'), '资产代码: no_assets_code 还未入库, 不能借还。')
def test_00501_do_biz(self): """资产代码code、用户login_name成功借出资产""" my_print('准备测试:' + self.test_00501_do_biz.__doc__) b_ret = servlet_assets.do_post(self.req) d = dict() d.update(json.loads(self.req.res_body.decode('UTF-8'))) self.assertEqual(b_ret, True, '借出成功') self.assertEqual(d.get('status'), Const.OpStatus.成功.value, '借出成功') self.assertEqual( d.get('message'), 'admin(18995533533)成功借出admin的图书({})'.format( self.req.parameters['code']))
def test_00601_do_biz(self): """资产代码code、管理者用户login_name成功还资产""" my_print('准备测试:' + self.test_00601_do_biz.__doc__) b_ret = servlet_assets.do_post(self.req) d = dict() d.update(json.loads(self.req.res_body.decode('UTF-8'))) self.assertEqual(b_ret, True, '归还成功') self.assertEqual(d.get('status'), Const.OpStatus.成功.value, '成功归还属于你管理的物品') self.assertEqual(d.get('message'), '管理员:admin 归还了 admin 借的 admin 的 图书')
def test_00200_do_post(self): """成功创建资产,附件方式""" my_print('准备测试:' + self.test_00200_do_post.__doc__) b_ret = servlet_assets_add.do_post(self.req) d = dict() d.update(json.loads(self.req.res_body.decode('UTF-8'))) self.assertEqual(b_ret, True, '执行成功, 创建资产成功') self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '创建资产成功') self.assertEqual( 'admin(login_name_0001) 添加资产 图书({})-类别一 并设置管理成功'.format( AssetsServletTestCase.assets_code), d.get('message'), '返回消息')
def test_00300_do_get_assets(self): """根据资产代码能找到记录""" my_print('准备测试:' + self.test_00300_do_get_assets.__doc__) self.req.parameters["action"] = 'get_assets' b_ret = servlet_assets.do_get(self.req) d = dict() d.update(json.loads(self.req.res_body.decode('UTF-8'))) self.assertEqual(b_ret, True, '执行成功,找到记录') self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '能找到记录') self.assertEqual( '找到 code:{} 或者 login_name:login_name_0001 的记录'.format( AssetsServletTestCase.assets_code), d.get('message'), '消息')
def test_00202_do_post(self): """成功创建资产,不带附件方式""" my_print('准备测试:' + self.test_00201_do_post.__doc__) self.req.parameters['image'] = None self.req.parameters['code'] = 'servlet.assets.code' b_ret = servlet_assets_add.do_post(self.req) d = dict() d.update(json.loads(self.req.res_body.decode('UTF-8'))) self.assertEqual(b_ret, True, '执行成功, 创建资产成功') self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '创建资产成功') self.assertEqual( 'admin(login_name_0001) 添加资产 图书({})-类别一 并设置管理成功'.format( self.req.parameters['code']), d.get('message'), '返回消息')
def test_00401_do_get_image(self): """根据资产代码能找到记录,但是没有图片""" my_print('准备测试:' + self.test_00401_do_get_image.__doc__) self.req.parameters["action"] = 'get_image' self.req.parameters[ 'code'] = AssetsServletTestCase.assets_code_no_attach b_ret = servlet_assets.do_get(self.req) d = dict() d.update(json.loads(self.req.res_body.decode('UTF-8'))) self.assertEqual(b_ret, False, '执行成功,记录没有图片') self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '不能找到图片') self.assertEqual( '资产代码:{0}, 没有图像记录或者图像记录格式不对'.format( AssetsServletTestCase.assets_code_no_attach), d.get('message'), '消息')
def test_00600_do_biz(self): """资产代码code、非管理者不能成功还资产,不是管理员""" my_print('准备测试:' + self.test_00600_do_biz.__doc__) self.req.parameters['userInfo'][ 'login_name'] = 'is_not_mng_assets_user' self.req.parameters['userInfo']['id'] = None b_ret = servlet_assets.do_post(self.req) d = dict() d.update(json.loads(self.req.res_body.decode('UTF-8'))) self.assertEqual(b_ret, True, '没有归还成功') self.assertEqual(d.get('status'), Const.OpStatus.失败.value, '不能归还不属于你管理的物品') self.assertEqual( d.get('message'), '资产: 图书({}) 不由你管理,不能完成归还动作'.format(self.req.parameters['code']))
def getDriveBoxPlot(SESSION): df = pd.DataFrame() for u in SESSION.query(User).all(): temp = SESSION.query(Match.date, Match.ride_distance).filter( Match.user_name == u.name).order_by(Match.date.desc()).all() temp_df = pd.DataFrame(temp, columns=['date', u.name]).set_index('date') df = pd.concat([df, temp_df]) tz = timezone(timedelta(hours=-5), "GMT-5") if "date" in df.columns: df["date"] = df["date"].apply( lambda x: x.replace(tzinfo=timezone.utc).astimezone(tz=tz)) ax = df.plot.box() ax.set_ylabel('Distance Driven (m)') fname = os.path.realpath('drive.png') plt.savefig(fname) my_print(fname)
def getDistances(SESSION, name): temp = SESSION.query( Match.date, Match.user_name, Match.ride_distance, Match.walk_distance).filter(Match.user_name == name).order_by( Match.date.desc()).all() df = pd.DataFrame(temp).set_index('date') tz = timezone(timedelta(hours=-5), "GMT-5") if "date" in df.columns: df["date"] = df["date"].apply( lambda x: x.replace(tzinfo=timezone.utc).astimezone(tz=tz)) ax = df.plot.hist(alpha=.5, title="{} Travel Samples".format(name), stacked=True) ax.set_xlabel('Distance (m)') df.plot.box() fname = os.path.realpath('distances.png') plt.savefig(fname) my_print(fname)
def makeTable(SESSION, user, columns=("date", "name", "rank", "kills"), limit=10): _cols = [getattr(Match, col) for col in columns] q = SESSION.query(*_cols).filter(Match.user_name.like(user)).order_by( Match.date.desc()).limit(limit) results = [] for r in q: results.append(r) df = pd.DataFrame(results, columns=columns) tz = timezone(timedelta(hours=-5), "GMT-5") if "date" in df.columns: df["date"] = df["date"].apply( lambda x: x.replace(tzinfo=timezone.utc).astimezone(tz=tz)) table = ff.create_table(df) file_path = os.path.realpath('table.png') py.image.save_as(table, filename=file_path) my_print(file_path)
def getHeadShotStats(SESSION): df = pd.DataFrame() for u in SESSION.query(User).all(): temp = SESSION.query( Match.date, Match.user_name, Match.headshot_kills).filter(Match.user_name == u.name).order_by( Match.date.desc()).all() temp_df = pd.DataFrame(temp, columns=['date', 'name', 'headshots']).set_index('date') df = pd.concat([df, temp_df]) tz = timezone(timedelta(hours=-5), "GMT-5") if "date" in df.columns: df["date"] = df["date"].apply( lambda x: x.replace(tzinfo=timezone.utc).astimezone(tz=tz)) _max = df['headshots'].max() df.hist(by=df['name'], bins=range(1, _max + 1), sharey=True) fname = os.path.realpath('headshots.png') plt.savefig(fname) my_print(fname)
def load_file(self): while True: # 是一个死循环,但会等待信号,因此不会卡死 filename = "" # 获取需载入的文件名 if self.is_train: index = self.train_list[self.train_index] my_print('[pl %s' % str(index).rjust(4)) filename = config.train_prefix + '-' + str(index) + '.dat' else: filename = config.train_prefix + "-val.dat" my_print('[pl validate data') # 载入数据文件 with open(filename, 'rb') as data_file: data = bytearray(data_file.read()) # 加入随机变换 if self.is_train and config.apply_symmetry: symmetry.apply_random_symmetry(data) # 将读入的文件设置为正确的数组格式 data = np.array(data).reshape(-1, config.head+361) # 打散数据 if self.is_train and config.shuffle_data: np.random.shuffle(data) # 将落子位置由XY坐标变为0-360的数 label = np.array(map(lambda x, y : x + y * 19, \ data[:, 0:1].flatten(), data[:, 1:2].flatten()), \ np.uint16) # 获取特征层,设置为正确的数组格式 data = np.unpackbits(data[:, config.head:config.head+361]).reshape(-1, 19, 19, 8) data = np.moveaxis(data, -1, 1) # 载入完成 my_print(']') if self.is_train: # 如果是载入训练数据,则需要不断载入 # 将数据填充到队列中 self.queue.put(obj = [data, label], block = True, timeout = None) # save self.train_index = self.train_index + 1 # 如已完成全部文件的训练,则重新打散文件的顺序 if (self.train_index >= len(self.train_list)): self.train_index = 0 random.shuffle(self.train_list) else: # 如果是载入测试数据,则可一次载入 self.data_list = [mx.ndarray.array(data, config.data_device), mx.ndarray.array(label, config.data_device)] gc.collect() # 要求垃圾回收,否则会耗费大量内存 if not self.is_train: return # 如果是载入测试数据,则一次就已完成载入 # 否则会停下来等待信号 if self.is_train: self.can_load_file.wait() self.can_load_file.clear()
def getVehicleDestroys(SESSION): for u in SESSION.query(User).all(): df = pd.DataFrame() for rank in SESSION.query( Match.rank).filter(Match.user_name == u.name).all(): rank = rank[0] temp = SESSION.query(Match.date, Match.vehicle_destroys).filter( Match.user_name == u.name).filter(Match.rank == rank).order_by( Match.date.desc()).all() temp_df = pd.DataFrame(temp, columns=['date', rank]).set_index('date') df = pd.concat([df, temp_df]) tz = timezone(timedelta(hours=-5), "GMT-5") if "date" in df.columns: df["date"] = df["date"].apply( lambda x: x.replace(tzinfo=timezone.utc).astimezone(tz=tz)) ax = df.plot.bar() ax.set_title("{} Vehicle Destruction".format(u.name)) ax.set_ylabel('Destroys') fname = os.path.realpath('vehicles.png') plt.savefig(fname) my_print(fname)
def main(inargs=None): args, unk = parseArgs(inargs) command = args.command.lower() #my_print(args.path) SESSION = getConnection(args.path) if command == 'getdata': try: getData(SESSION) my_print("Updated the Database!") except Exception as e: SESSION.rollback() my_print(e) my_print("Rolled the database back") elif command == 'getdistances': getDistances(SESSION, args.user) elif command == 'extractstuff': extractStuff(SESSION) elif command == 'getWalkBoxPlot'.lower(): getWalkBoxPlot(SESSION) elif command == 'getDriveBoxPlot'.lower(): getDriveBoxPlot(SESSION) elif command == 'getTravelBoxPlot'.lower(): getTravelBoxPlot(SESSION) elif command == 'getDamageStats'.lower(): getDamageStats(SESSION) elif command == 'getHeadShotStats'.lower(): getHeadShotStats(SESSION) elif command == 'getKillsStats'.lower(): getKillsStats(SESSION) elif command == 'makeTable'.lower(): makeTable(SESSION, args.user, args.columns, args.limit) else: my_print("invalid command {}".format(command))
def read(self): read, write, opt = select.select([self.sock], [], [], 5) if not len(read): my_print('Error: Cannot communicate with the server') buf = os.read(self.sockfd, 4096) if len(buf) == 0: my_print('Error: Disconnected from server') self.close() quit_client() print 'server>', for line in buf.splitlines(): my_print(line)
def read(self): read, write, opt = select.select([ self.sock ], [ ], [ ], 5) if not len(read): my_print('Error: Cannot communicate with the server') buf = os.read(self.sockfd, 4096) if len(buf) == 0: my_print('Error: Disconnected from server') self.close() quit_client() print 'server>', for line in buf.splitlines(): my_print(line)
def getData(SESSION): for n, _id in IDS.items(): try: fillOutUser(SESSION, _id, n) except Exception as e: my_print("{} already in users!".format(n)) my_print("Adding {} to database".format(n)) dupe = False for p in range(0, N_PAGES): time.sleep(random() * 3 + 1) # sleep from 1-4seconds between requests if dupe: if not GET_OLD: break try: url = LATEST_URL.format(_id) if p > 0: #need to get the hash of the last request's id try: #get the last match in the list token = j["matches"]["items"][-1]["offset"] except Exception: token = "" my_print("Couldn't get the token") break # no point continuing url += NEXT_TOKEN.format(token) #my_print(url) r = requests.get(url, params=PARAMS) j = r.json() matches = j['matches']['items'] if len(matches) > 0: for m in matches: dupe = fillOutMatch(SESSION, m, n, _id) if dupe: break else: # If there were no matches we've gone too far break except Exception as e: print(e) continue SESSION.commit() # commit after each user SESSION.commit() SESSION.close()
def epoch_callback(epoch, symbol, arg_params, aux_params): global time_last, epoch_accuracy, epoch_loss, epoch_loss_last # 输出真正的epoch数(每完成1次全部文件的训练,为一个真正的epoch) real_epoch = float(epoch) / (config.train_end_index - config.train_begin_index + 1) my_print(' %.2f', (real_epoch)) # 输出性能指标 batch_accuracy = my_metric[0].get_name_value()[0][1] cross_loss = my_metric[1].get_name_value()[0][1]; my_print(' : 1/2/5/10 %.2f', (100.0 * batch_accuracy)) my_print('-%.2f', (100.0 * my_metric[2].get_name_value()[0][1])) my_print('-%.2f', (100.0 * my_metric[3].get_name_value()[0][1])) my_print('-%.2f%%', (100.0 * my_metric[4].get_name_value()[0][1])) my_print(' ce %.3f', (cross_loss)) # 输出当前学习速率 my_print(' : lr %.4f' % (config.learning_rate)) # 更新一些性能指标 epoch_accuracy += batch_accuracy; epoch_loss += cross_loss; # 输出训练耗时 time_now = time.time() if time_last is None: time_last = time_now my_print(' : n/a\n') else: my_print(' : %.2fs\n' % (time_now - time_last)) time_last = time_now # 定期保存模型,并测试性能,并调整学习速率 if epoch % config.save_period == 0: # 保存模型 module.save_checkpoint(config.model_prefix, epoch, save_optimizer_states=True) # 测试在测试数据集的性能指标 val_metric = module.score(val_iter, [mx.metric.Accuracy(), mx.metric.CrossEntropy()]) val_accuracy = val_metric[0][1] val_loss = val_metric[1][1] # 输出性能指标 epoch_accuracy = float(epoch_accuracy) / config.save_period epoch_loss = float(epoch_loss) / config.save_period print("=== [saved] : train %.2f%% ce %.3f : validate %.2f%% ce %.3f ===" % (100.0 * epoch_accuracy, epoch_loss, 100.0 * val_accuracy, val_loss)) # 保存到日志中 os.write(log_file, str(real_epoch) + "," + str(epoch_accuracy) + "," + str(val_accuracy) + "," + str(config.learning_rate) + "\n") os.fsync(log_file) # 自适应地减少学习速率:如果在训练数据的平均损失提高了,则减少学习速率 if epoch_loss > epoch_loss_last and epoch_loss_last != -1: config.learning_rate = config.learning_rate * config.learning_decay epoch_loss_last = epoch_loss epoch_accuracy = 0.0 epoch_loss = 0.0 # 学习速率很低时,终止训练 if config.learning_rate < config.exit_learning_rate: exit(0)
def extractStuff(SESSION): for u in SESSION.query(User).all(): my_print(u.name) my_print("\t{} Matches Played".format(len(u.matches))) getDistances(u.name)
def test_99999_show_db(self): """显示目前数据库的数据""" my_print('准备测试:' + self.test_99999_show_db.__doc__) show_db(Conf.db_file_path_rw) if Conf.db_file_path_img != Conf.db_file_path_rw: show_db(Conf.db_file_path_img)
from tests.tests import test_funcs from utils import my_print from config import set_db_name, set_verbose set_db_name("funmongo_tests") set_verbose(False) success = 0 failure = 0 fails = [] for func in test_funcs: try: func() my_print("✔ " + func.__name__) success += 1 except Exception as e: my_print(repr(e)) my_print("✖ " + func.__name__) failure += 1 fails.append(func.__name__) if len(fails) == 0: my_print("\n" + "✔ All tests passed") else: my_print("\n" + "✖ " + str(success) + " passed, " + str(failure) + " failed : ") for fail in fails: my_print(" " + fail)