def handle_request(self): """ Handles the event :return: result of handling the event, result send back to REST admin api """ # Setup logging classname = self.__class__.__name__ dt = datetime.utcnow() logstream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) self._logger = Logger(logstream=logstream, buffersize=20, context=self._context) with Logger(logstream=logstream, buffersize=20, context=self._context) as logger: logger.info("Handler {} : Received request {}", self.__class__.__name__, json.dumps(self._event)) # get access to admin api admin = ConfigAdmin(logger=logger, context=self._context) # get api action and map it to a function in the admin API fn = getattr(admin, self.action) if fn is None: raise ValueError("Action {} does not exist".format(self.action)) # build parameters for admin API call temp = self._event.get("params", {}) params = {p: temp[p] for p in temp} if "name" in self._event: params["name"] = self._event["name"] logger.info("Calling \"{}\" with parameters {}", fn.__name__, params) # call the admin API result = fn(**params) logger.info("Call result is {}", result) return result
def go_to_run(self): operacaseexcel = OperaCaseExcel() '''获取需执行的xlsx及json文件,返回的是res列表,里面嵌套元组(xlsx名,json文件名)''' run_data = operacaseexcel.get_case_name() '''循环run_data列表,分别获取xlsx名字与json文件名''' for i in range(0, len(run_data)): xlsx_data = run_data[i][0] json_data = run_data[i][1] logger = Logger(logger=xlsx_data).getlog() logger.info('测试开始') logger.info('获取json文件:%s', json_data) print(xlsx_data) p, f, count = 0, 0, 0 getdata = GetData(xlsx_data) #获取行数 test_count = getdata.get_case_line() for j in range(1, test_count): logger = Logger(logger=xlsx_data).getlog() is_run = getdata.get_is_run(j) if is_run: # print(i) url = getdata.get_request_url(j) methon = getdata.get_request_way(j) #请求方法 # print(methon) data = getdata.getdata(j, json_data) #请求数据 cookie = getdata.get_cookie(j) #是否携带cookie expect = getdata.get_expect_data(j) #期望值 #依赖caseID depent_case = getdata.is_depend(j) if depent_case != None: print(self.depend.get_case_id_data()) #依赖响应数据 depend_response_data = self.depend.get_key_for_data(j) print(depend_response_data) #获取依赖的key depend_key = self.getdata.get_dependent_data(j) # print(depend_key) data[depend_key] = depend_response_data # print(data) print(methon, url, data) res = self.run.run_main(methon, url, data, cookie) logger = logger.info('为什么会输出那么多次?') # print(res) if self.commonutil.is_contain(expect, res): getdata.write_result(j, 'pass') p = p + 1 # print('测试通过') else: getdata.write_result(j, res) f = f + 1 # print(res) count = p + f print(p) print(f) print(count)
def readProperty(self, key): try: Logger().info("开始从{}文件中读取关键字:{}:".format(self.file_path, key)) config = ConfigObj(self.file_path, encoding='UTF8') # 读配置文件 dict = config[key] return dict except FileNotFoundError as e: Logger().error("读取配置文件失败.{}:".format(e))
def get_conn(self): ''' 获取mysql的链接 :return: mysql connection ''' try: conn = pymysql.connect(host=host, user=user, password=password,database=database, port=port, charset=charset) Logger().error("链接数据库成功!!") return conn except: Logger().error("链接数据库异常!!")
def api_post_login(self, api_url, api_method, header,request_data): #判断是否是字符串 if isinstance(header,str): header=json.loads(header) if isinstance(request_data,str): #请求数据转为字典 request_data=ast.literal_eval(request_data) #请求数据转键值对 request_data=parse.urlencode(request_data) Logger().info("登录接口请求数据为:{}.".format(request_data)) #调用post方法,并且返回服务器响应对象 response=RunMethod().run_main(api_url, api_method, request_data,header) Logger().info("登录接口响应数据为:{}" .format(response)) return response
def api_post_myuserinfo(self, api_url, api_method, header, request_data): #判断header是否为字符串 if isinstance(header, str): header = json.loads(header) if isinstance(request_data, str): request_data = ast.literal_eval(request_data) #转换键值对格式,如:mobile=xxxxxx&password=xxxxx&......... request_data = parse.urlencode(request_data) Logger().info("个人中心接口请求数据为:{}.".format(request_data)) response = RunMethod().run_main(api_url, api_method, data=request_data, header=header) Logger().info("个人中心接口响应数据为:{}.".format(response)) return response
def __init__(self): """ Initializer. """ self.lines = [] logger = Logger() self.logger = logger.get_logger(MAIN + ".Parser")
def get_logger(): global log_instance if log_instance: return log_instance log_instance = Logger('log.txt') return log_instance
class WorldServer(RNServer): logger = Logger('WorldServer') def __init__(self, ip='127.0.0.1', port=2002, backlog=10000, password=b'3.25 ND1', database=None): super().__init__((ip, port), backlog, password) self.database = database self.add_handler(RNEvent.UserPacket, self.handle_packet) self.world_handlers = {} self.register_handler(PacketHeaders.HANDSHAKE.value, Handshake) def handle_packet(self, packet, address): header = packet[0:8] if header in self.world_handlers: self.world_handlers[header].database = self.database res = self.world_handlers[header].construct_packet(self, packet) if res is not None: self.send(res, address) else: self.logger.warn( 'unable to construct packet for header-{}'.format(header)) else: self.logger.warn( 'no registered handlers found for header-{}'.format(header)) def register_handler(self, header, func): self.world_handlers[header] = func
class Login(object): logger = Logger().logger driver = None # username = Location("用户名输入框", "input[type=text]") # passwd = Location("密码输入框", "input[type=password]") # submit = Location("登录按钮", ".btn-login") # logo = Location("大后台管理系统图标", "#leftpanel .img") def __init__(self): # 启动driver try: self.driver = ChromeDriver() self.driver.maximize_window() self.driver.get(Config.url) self.driver.set_page_load_timeout(Config.TIMEOUT) self.driver.set_script_timeout(10) except Exception as e: Log.error("driver初始化失败....\n系统信息: {} \n浏览器类型: {}\n详细信息: {}".format( Config.system, Config.BROWSER, str(e))) if self.driver: self.driver.quit() raise Exception(e) def login(self): # 屏蔽登录部分 # self.driver.send(self.username, Config.USER) # self.driver.send(self.passwd, Config.PWD) # self.driver.click(self.submit) # assert self.driver.exists(self.logo), "登录失败, 未找到大后台左上角logo" assert self.driver.title == "微软 Bing 搜索 - 国内版", \ "打开bing失败, 浏览器title不为'微软 Bing 搜索 - 国内版',可能未进入bing首页" return self.driver
def url_set_up(self, author, language, save_to_db=True, data_in=[], author_in=[]): author_data = author_in if save_to_db: author_data = self.d.check_author(author) if len(author_data) > 0: status = 'All quotes from ' + author + ' in '+language+' are up to date!' if save_to_db: self.d.write_log('status', status) Logger().info(status) logger.info("Log registered!") return { "status": False, "result": {} } else: logger.info('****** formating url **** ') addr = self.url + author logger.info(addr) data = self.__fetch_fr(scrap(addr), author, save_to_db=save_to_db, data_in=data_in) return { "status": True, "result": data }
def __init__(self, event, context): """ Initializes the instance. :param event: Handled event :param context: Context if running in Lambda """ self._context = context self._event = event self._tracking_table = None self._concurrency_table = None self.started_tasks = 0 self.started_waiting_tasks = 0 self.waiting_for_execution_tasks = 0 self.started_completion_checks = 0 self.finished_concurrency_tasks = 0 self.done_work = False self.invoked_lambda_functions = [] self.events_client = None # setup logging classname = self.__class__.__name__ dt = datetime.utcnow() logstream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) self._logger = Logger(logstream=logstream, context=self._context, buffersize=20, debug=False)
def run(self): #循环测试报告目录,并清空目录,保证测试报告目录下只保留最新的测试报告 for filename in os.listdir(self.report_path): f = str(self.report_path + filename) # 使用绝对路径 if os.path.isdir(f): # 判断是文件夹还是文件 if not os.listdir(f): # 判断文件夹是否为空 pass else: break else: #循环删除测试所有的测试报告 os.remove(f) now = time.strftime("%Y-%m-%d_%H_%M_%S") fp = open("./report/" + now + "_result.html", 'wb') tests = unittest.defaultTestLoader.discover( "./case", pattern='test*.py', top_level_dir=None) #执行case目录下所有的测试用例 runner = HTMLTestRunner(stream=fp, title=self.title, description=self.des, tester="Shaomj") #生成测试报告 runner.run(tests) #运行测试 fp.close() #根据开关判断是否需要发邮件 if is_send_mail == 'on' or is_send_mail == 'ON': self.send_email() #调用发送邮件方法 else: Logger().info("所有测试用例已运行完毕,测试结束!")
def main(dataset, model, dropout, bias_init, learning_rate, class_weights, metrics, epochs, save_path, log_path): args = locals() os.makedirs(os.path.dirname(log_path), exist_ok=True) os.makedirs(os.path.dirname(save_path), exist_ok=True) logger = Logger(__name__) fd = open(log_path, "a") old_fd = sys.stdout sys.stdout = fd logger.logger.info("Begin") print(" ".join([ "--{} {}".format( key, str(val) if not isinstance(val, list) else " ".join(map(str, val))) for key, val in args.items() if val is not None ])) dataset = Dataset(dataset) dataset.build() model = Model(model, dropout, bias_init, class_weights, learning_rate, metrics) model.build() model.train(dataset.train_gen, dataset.val_gen, epochs, save_path) logger.logger.info("End") sys.stdout = old_fd fd.close()
def query_data(self,sql): ''' 根据sql查询数据并且返回 :param sql: :return: list[dict] ''' conn =ReadMySql.get_conn() try: cursor=conn.cursor(pymysql.cursors.DictCursor) cursor.execute(sql) Logger().error("查询数据库成功!!") return cursor.fetchall() except: Logger().error("查询数据库异常!!") finally: conn.close()
class NewTestcase(unittest.TestCase): logger = Logger() def assertEqual_new(self, first, second, msg, driver): try: self.assertEqual(first, second, msg) except Exception as e: self.input_log_and_save_screenshot(str(e), driver) raise def asserIn_new(self, first, second, msg, driver): try: self.assertIn(first, second, msg) except Exception as e: self.input_log_and_save_screenshot(str(e), driver) raise def assertIs_new(self, first, second, msg, driver): try: self.assertIs(first, second, msg) except Exception as e: self.input_log_and_save_screenshot(str(e), driver) raise def assertisNone_new(self, exp, msg, driver): try: self.assertIsNone(exp, msg) except Exception as e: self.input_log_and_save_screenshot(str(e), driver) raise def input_log_and_save_screenshot(self, exc, driver): self.logger.erro(f"断言失败" + exc) save_screen_shot(driver)
def __init__(self, event, context): """ Initializes helper setup class :param event: :param context: """ CustomResource.__init__(self, event, context) # get "clean" set of arguments self.arguments = copy(self.resource_properties) self.arguments = { a: self.resource_properties[a] for a in self.resource_properties if a not in ["ServiceToken", "Timeout"] } self.configuration_bucket = os.getenv(configuration.ENV_CONFIG_BUCKET, None) self.scheduler_role_arn = self.arguments.get("SchedulerRole") # setup logging dt = datetime.utcnow() classname = self.__class__.__name__ logstream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) self._logger = Logger(logstream=logstream, context=context, buffersize=10)
def _run_schedulers_in_process(self): # local execution, used for debugging in non lambda environment and IDE's result = {} # noinspection PyTypeChecker account_names = list(self.account_names(self.configuration)) self._logger.info( INF_RUN_SCHEDULER_LOCAL.format( ", ".join(self.configuration.scheduled_services), ", ".join(account_names), ", ".join(self.configuration.regions))) for service in self.configuration.scheduled_services: service_strategy = SCHEDULER_TYPES[service]() scheduler = InstanceScheduler( service=service_strategy, scheduler_configuration=self.configuration) s = "-".join([ LOG_STREAM_PREFIX, "-".join(account_names), service, "-".join(self.configuration.regions) ]) dt = datetime.utcnow() logstream = LOG_STREAM.format(s, dt.year, dt.month, dt.day) with Logger(logstream=logstream, buffersize=60 if self.configuration.trace else 30, context=self._context, debug=self.configuration.trace) as logger: result[service] = scheduler.run( state_table=self.state_table, scheduler_config=self.configuration, lambda_account=self.lambda_account, logger=logger, context=self._context) self._logger.info(INF_SCHEDULER_RESULT, json.dumps(result, indent=3)) return result
def lambda_handler(event, context): try: dt = datetime.utcnow() log_stream = LOG_STREAM.format(dt.year, dt.month, dt.day) result = {} with Logger(logstream=log_stream, buffersize=20, context=context, debug=util.as_bool(os.getenv(configuration.ENV_TRACE, False))) as logger: logger.info("InstanceScheduler, version %version%") logger.debug("Event is {}", util.safe_json(event, indent=3)) for handler_type in [SchedulerRequestHandler, SchedulerSetupHandler, ScheduleResourceHandler, AdminCliRequestHandler, CloudWatchEventHandler]: if handler_type.is_handling_request(event): start = time() handler = handler_type(event, context) logger.info("Handler is {}".format(handler_type.__name__)) try: result = handler.handle_request() except Exception as e: logger.error("Error handling request {} by handler {}: ({})\n{}", json.dumps(event), handler_type.__name__, e, traceback.format_exc()) execution_time = round(float((time() - start)), 3) logger.info("Handling took {} seconds", execution_time) return result logger.debug("Request was not handled, no handler was able to handle this type of request {}", json.dumps(event)) finally: configuration.unload_scheduler_configuration()
def parse(self): """Parse our options, create checkpoints directory suffix, and set up gpu device.""" opt = self.gather_options() opt.isTrain = self.isTrain # train or test # process opt.suffix if opt.suffix: suffix = ( '_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' opt.name = opt.name + suffix # self.print_options(opt) os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids for op, value in opt.__dict__.items(): print('{:>25}: {:<30}'.format(op, str(value))) backup_dir = osp.join(opt.checkpoints_dir, opt.name) util.mkdir(backup_dir) logger = Logger( osp.join(backup_dir, 'log' + time.strftime(".%m_%d_%H:%M:%S") + '.txt')) opt.backup = backup_dir util.save_opt(opt, backup_dir) self.opt = opt return self.opt, logger
def __init__(self, model, dropout=None, bias_init=None, class_weights=None, learning_rate=1e-3, metrics=None): self.model = model self.dropout = dropout self.bias_init = bias_init self.learning_rate = learning_rate self.class_weights = class_weights self.metrics = metrics if metrics else [] self._logger = Logger(__name__)
class Server(object): logger = Logger('lego.py') def __init__(self): db = Database('$2a$12$DuLWTxWJ.WdWrbIYKf7Es.') auth_server = AuthServer(port=1001, database=db) world_server = WorldServer(port=2002, database=db) self.logger.info('server started')
def prepare_logging(self): """Prepare_logging.""" root_log_dir = self.config.logging.log_dir if os.path.exists(root_log_dir) is False: os.makedirs(root_log_dir) self.log_dir = os.path.join(root_log_dir, self.current_time, "") self.logger = Logger(self.log_dir)
def insert_or_update_data(self,sql): ''' 执行新增insert或者update的sql :param sql: :return: 不返回内容 ''' conn=self.get_conn() try: cursor=conn.cursor() cursor.execute(sql) conn.commit() Logger().error("更新数据库成功!!") except: Logger().error("更新数据库异常!!") finally: conn.close()
def get_logger(): global logger if logger is None: logger = Logger('obj_service', log2console=False, log2file=True, logfile=config.YOLO_LOG_PATH).get_logger() return logger
def get_logger(): global logger if logger is None: logger = Logger('gender_service', log2console=False, log2file=True, logfile=config.GENDER_LOG_PATH).get_logger() return logger
def __init__(self, world, id, json_data): self.world = world self.id = id self.logger = Logger() self._mode = self.Mode.TRAIN assert self._check_action_space(), \ Logger.print("Invalid action space, got {:s}".format(str(self.get_action_space()))) self._enable_training = True self.path = Path() self.iter = int(0) self.start_time = time.time() self._update_counter = 0 self.update_period = 1.0 # simulated time (seconds) before each training update self.iters_per_update = int(1) self.discount = 0.95 self.mini_batch_size = int(32) self.replay_buffer_size = int(50000) self.init_samples = int(1000) self.normalizer_samples = np.inf self._local_mini_batch_size = self.mini_batch_size # batch size for each work for multiprocessing self._need_normalizer_update = True self._total_sample_count = 0 self._output_dir = "" self._int_output_dir = "" self.output_iters = 100 self.int_output_iters = 100 self.train_return = 0.0 self.test_episodes = int(0) self.test_episode_count = int(0) self.test_return = 0.0 self.avg_test_return = 0.0 self.exp_anneal_samples = 320000 self.exp_params_beg = ExpParams() self.exp_params_end = ExpParams() self.exp_params_curr = ExpParams() # jump and use ppo_agent._load_params() # super() => pg_agent._load_params() # super() => rl_agent._load_params() self._load_params(json_data) # jump and use pg_agent._build_replay_buffer() # super() => rl_agent._build_replay_buffer() self._build_replay_buffer(self.replay_buffer_size) # jump and use pg_agent._build_normalizers() # super() => tf_agent._build_normalizers() self._build_normalizers() # jump and use rl_agent._build_bounds() self._build_bounds() # jump and use pg_agent.reset() self.reset() return
class Suite(unittest.TestSuite): logger = Logger().logger def run(self, result, debug=False): topLevel = False if getattr(result, '_testRunEntered', False) is False: result._testRunEntered = topLevel = True for index, test in enumerate(self): retry = getattr(test, "retry", Config.RETRY) if result.shouldStop: break for i in range(1, retry + 2): if _isnotsuite(test): self._tearDownPreviousClass(test, result) self._handleModuleFixture(test, result) self._handleClassSetUp(test, result) result._previousTestClass = test.__class__ if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue self.logger.info("用例: {}正在尝试第{}次运行!".format(test.__class__.__name__, i)) if not debug: test(result) else: test.debug() if i < retry + 1: # 重试判断 这段写的很丑就别细看了,欢迎优化 error, fail = None, None fail_id = [x.get("case_id") for x in result.failures] error_id = [x.get("case_id") for x in result.errors] if test.case_id in fail_id: fail = fail_id.index(test.case_id) if test.case_id in error_id: error = error_id.index(test.case_id) if error is None and fail is None: # 说明没有失败or错误, 停止重试 break elif error is not None: self.logger.warning("用例: {} 第{}次失败 原因: {}".format(test.__class__.__name__, i, str(result.errors[error]['msg']))) del result.errors[error] elif fail is not None: self.logger.warning("用例: {} 第{}次失败 原因: {}".format(test.__class__.__name__, i, str(result.failures[fail]['msg']))) del result.failures[fail] result._previousTestClass = test.__class__ continue if self._cleanup: self._removeTestAtIndex(index) if topLevel: self._tearDownPreviousClass(None, result) self._handleModuleTearDown(result) result._testRunEntered = False return result
def setUpClass(self): # 初始化 self.reponse = None self.check = CheckPoint() self.opera = OperationExcel(self.excel_path, 0) self.getdata = GetDataFromExcel(self.excel_path, 0) self.data_list = self.opera.data self.login = Login(self.excel_path, 0) self.get_md5 = GetMD5() # 调用登录方法 Logger().info("开始调用公共的登录方法") self.response = self.login.login() self.uid = self.response.get("data").get("uid") self.key = self.response.get("data").get("key") self.gettime = self.get_md5.get_time() self.sign = self.get_md5.get_sign(self.uid + self.gettime) Logger().info("【登录成功后,获取到的uid为:{},获取到的key为:{}.】".format( self.uid, self.key))
def run(dir, inpath): if not os.path.exists(dir): os.makedirs(dir) logfile = Logger(dir + '/qnrsearch.log') logfile.open() try: run_sieves( [(dnareader, { 'item_limit': 0 }), (hmmsearch, { 'model_path': sys.path[0] + '/hmm_models/joint.hmm', 'hmmsearch_out': dir + '/hmmsearch_outj', 'write_only_domain': False }), [(hmmsearch, { 'model_path': sys.path[0] + '/hmm_models/classA.hmm', 'hmmsearch_out': dir + '/hmmsearch_outa', 'write_only_domain': True }), (hmmsearch, { 'model_path': sys.path[0] + '/hmm_models/classB.hmm', 'hmmsearch_out': dir + '/hmmsearch_outb', 'write_only_domain': True }), (hmmsearch, { 'model_path': sys.path[0] + '/hmm_models/classC.hmm', 'hmmsearch_out': dir + '/hmmsearch_outc', 'write_only_domain': True }), (hmmsearch, { 'model_path': sys.path[0] + '/hmm_models/classD.hmm', 'hmmsearch_out': dir + '/hmmsearch_outd', 'write_only_domain': True })], (sga, { 'error_rate': 0.05, 'min_assembly_overlap': 20, 'min_merge_overlap': 20, 'resolve_small': 5, 'parse_output': False })], [ '', dir + '/fragments.db', dir + '/fragments_passed.db', dir + '/fragments_passed_second.db', dir + '/clusters.db' ], #dbs [ inpath, dir + '/dnareader.pfa', dir + '/fragments_passed.pfa', dir + '/fragments_passed_second.nfa', dir + '/clusters.nfa' ], #files logfile, level, 0, 3) finally: logfile.close()