class Handler(object): def __init__(self,section,filename=None,data_results=None): self.logger = Logger() self.key=ConfigurationManager.readCuckooResultsConfig(variable='key',section=section) self.encapsulation = literal_eval(ConfigurationManager.readCuckooResultsConfig(variable='encapsulation',section=section)) self.keys = list(ConfigurationManager.readCuckooResultsConfig(variable='keys',section=section).split(',')) #Check if there are not any keys if self.keys==['']: self.keys=None self.subsectionskeys={} if self.encapsulation: self.subsections = returnsubsections(self.encapsulation,section=section,subsections=[]) for subsection in self.subsections: self.subsectionskeys[ConfigurationManager.readCuckooResultsConfig(variable='key',section='subsection_'+subsection)] = list(ConfigurationManager.readCuckooResultsConfig(variable='keys',section='subsection_'+subsection).split(',')) results=None try: if data_results is not None: results=data_results[self.key] elif filename is not None: results = load_results(filename)[self.key] except Exception, e: self.logger.errorLogging(str(e)) if isinstance(results,dict): self.dictionary = results self.list = None elif isinstance(results,list): self.list= results self.dictionary = None else: self.list = None self.dictionary = None
def MatchFiles(checkerFile, c1File, targetArch, debuggableMode): for testCase in checkerFile.testCases: if testCase.testArch not in [None, targetArch]: continue if testCase.forDebuggable != debuggableMode: continue # TODO: Currently does not handle multiple occurrences of the same group # name, e.g. when a pass is run multiple times. It will always try to # match a check group against the first output group of the same name. c1Pass = c1File.findPass(testCase.name) if c1Pass is None: Logger.fail( 'Test case "{}" not found in the CFG file'.format(testCase.name), testCase.fileName, testCase.startLineNo, ) Logger.startTest(testCase.name) try: MatchTestCase(testCase, c1Pass) Logger.testPassed() except MatchFailedException as e: lineNo = c1Pass.startLineNo + e.lineNo if e.assertion.variant == TestAssertion.Variant.Not: Logger.testFailed( "NOT assertion matched line {}".format(lineNo), e.assertion.fileName, e.assertion.lineNo ) else: Logger.testFailed( "Assertion could not be matched starting from line {}".format(lineNo), e.assertion.fileName, e.assertion.lineNo, )
def validate_time_period(query_tokens): log = Logger().get('reportserver.manager.utilities') log.debug("given query_tokens:" + str(query_tokens)) uom = None units = None for token in query_tokens: if '=' in token: uom,units = token.split('=') if uom in UnitOfMeasure.get_values(UnitOfMeasure): units = int(units) break else: uom = None units = None # default if we aren't given valid uom and units #TODO: get this from a config file. if uom is None or units is None: uom = "days" units = 1 log.debug("validate_time_period: " + str(uom) + ": " + str(units)) return (uom, units)
class BatchCrawler(): MAX_DOCS_NUM = 100 def __init__(self, database_config_path, source_name, domain, encode, request_interval): self.logger = Logger("crawler", domain) self.adapter = DocRawAdapter(database_config_path, source_name, self.logger) self.domain = domain self.encode = encode self.request_interval = request_interval def run(self): while True: count = 0 try: for url_hash, url in self.adapter.load_uncrawled_docs(BatchCrawler.MAX_DOCS_NUM): count += 1 self.logger.log("crawling url %s"%url, 2) page = common_utils.page_crawl(url) if page == None: self.adapter.update_doc_raw_as_crawled_failed(url_hash) continue if self.encode != "utf-8": page = unicode(page, self.encode).encode("utf-8") self.adapter.update_doc_raw_with_crawled_page(url_hash, "utf-8", page) time.sleep(float(self.request_interval)) if count < BatchCrawler.MAX_DOCS_NUM: break except: self.logger.log("mongo error")
class DataQueue: def __init__(self): self.dataQueue = queue.Queue() self.dv = datavalidator.DataValidator() self.log = Logger().get('database.dataqueue.DataQueue') """we want to check the data here and fail early if the data is good then we want to put it in the data queue we will want another python script for the validations (datavalidator.py) we need to enforce type constraints because the database will not see datavalidator.py""" def insert_into_data_queue(self, value): if not self.dv.run_all_checks(value): self.log.error('--> Validation failed! Unable to add data ' 'into data queue: ' + str(value)) return False try: self.dataQueue.put(value) except queue.Full as e: self.log.critical('Data queue is full!') finally: return True def get_next_item(self): item = self.dataQueue.get() self.dataQueue.task_done() return item def check_empty(self): result = self.dataQueue.empty() return result
def process_do_maintenance(self): """ メンテナンスの設定を行う """ if self.operation.maintenance_mode: self.operation.render_maintenance_config() self.operation.cp_maintenance_config() self.operation.nginx.restart() Logger.put('メンテナンスを{}にしたよ'.format(self.operation.maintenance_mode))
def render_vhost_conf(self): template = self.env.get_template('vhost.conf.j2') for virtual_host in self.virtual_hosts: nginx_config = template.render(virtual_host=virtual_host) virtual_host_conf = os.path.join(self.nginx.files_dir, virtual_host.conf) with open(virtual_host_conf, 'w+') as dest: Logger.put("{}を生成したよー".format(virtual_host_conf)) dest.write(nginx_config.encode('utf-8'))
def addAssertion(self, new_assertion): if new_assertion.variant == TestAssertion.Variant.NextLine: if not self.assertions or \ (self.assertions[-1].variant != TestAssertion.Variant.InOrder and \ self.assertions[-1].variant != TestAssertion.Variant.NextLine): Logger.fail("A next-line assertion can only be placed after an " "in-order assertion or another next-line assertion.", new_assertion.fileName, new_assertion.lineNo) self.assertions.append(new_assertion)
def parse_json(self): init_data = {} if not isinstance(self.item['data'], str): return False try: init_data = json.loads(self.item['data']) except: Logger.put("JSONにできないやつ送られてきた!. {}".format(self.item['data'])) return False self.operation = Operation(init_data) return True
def cp(self, _from, _to): """ cp _from _to @param _from str file path @param _to str file path @return Popen#returncode, res, err """ cp = 'cp -rf {} {}'.format(_from, _to), # DEBUG Logger.put(cp) ret_code, res, err = shell_command(cp) return ret_code, res, err
def get_date_delta(iso_date_from, iso_date_to): try: date_from = dateutil.parser.parse(iso_date_from) date_to = dateutil.parser.parse(iso_date_to) delta = date_to - date_from except Exception as e: log = Logger().get('reportserver.manager.utilities') log.error("Error: " + str(e)) delta = 0 return str(delta)
def DumpPass(outputFilename, passName): c1File = ParseC1visualizerStream(os.path.basename(outputFilename), open(outputFilename, "r")) compiler_pass = c1File.findPass(passName) if compiler_pass: maxLineNo = compiler_pass.startLineNo + len(compiler_pass.body) lenLineNo = len(str(maxLineNo)) + 2 curLineNo = compiler_pass.startLineNo for line in compiler_pass.body: Logger.log((str(curLineNo) + ":").ljust(lenLineNo) + line) curLineNo += 1 else: Logger.fail("Pass \"" + passName + "\" not found in the output")
def __init__(self, parent, name, body, startLineNo): self.parent = parent self.name = name self.body = body self.startLineNo = startLineNo if not self.name: Logger.fail("C1visualizer pass does not have a name", self.fileName, self.startLineNo) if not self.body: Logger.fail("C1visualizer pass does not have a body", self.fileName, self.startLineNo) self.parent.addPass(self)
def __init__(self, parent, name, startLineNo, testArch = None): assert isinstance(parent, CheckerFile) self.parent = parent self.name = name self.assertions = [] self.startLineNo = startLineNo self.testArch = testArch if not self.name: Logger.fail("Test case does not have a name", self.fileName, self.startLineNo) self.parent.addTestCase(self)
def update_analyzers_pool(): logger = Logger() try: global analyzers_pool analyzers = open("analyzers", "r") fcntl.fcntl(analyzers, fcntl.LOCK_EX) data = analyzers.read() fcntl.fcntl(analyzers, fcntl.LOCK_UN) analyzers.close() analyzers_pool = literal_eval(data) except Exception, e: info = str(e) logger.errorLogging(info)
class DataManager(Thread): """ This is the DataManager class, it creates the database, data queue and the condition variable for synchronization between it, the framework and the plugins """ def __init__(self): super().__init__() self.db = Database() self.db.create_default_database() self.q = DataQueue() self.condition = Condition() self.kill = False self.logger = Logger().get('database.datamanager.DataManager') def run(self): """ This will insert all data in the queue and then once finished give up control of the condition variable """ while not self.kill: self.condition.acquire() if self.q.check_empty(): self.condition.wait() while not self.q.check_empty(): value = self.q.get_next_item() Table_Insert.prepare_data_for_insertion( self.q.dv.table_schema, value) self.condition.notify() self.condition.release() def insert_data(self, data): """ Synchronously inserts data into the database. :param data: A dictionary with a table name as its key and a dictionary of column names and corresponding values as its value. """ self.condition.acquire() if self.q.insert_into_data_queue(data): self.condition.notify() self.condition.release() def shutdown(self): self.kill = True self.condition.acquire() self.condition.notify() self.condition.release() self.join() self.logger.debug('Data manager has shut down.')
class IpsServiceHandler: def __init__(self): self.log = Logger().get("reportserver.manager.IpsServiceHandler.py") def process(self, rqst, path_tokens, query_tokens): uom = None units = None self.log.info("processing ipaddress request:" + str(path_tokens) + str(query_tokens)) try: time_period = utilities.validate_time_period(query_tokens) uom = time_period[0] units = time_period[1] except ValueError: rqst.badRequest(units) return if len(path_tokens) == 5: ipaddress = path_tokens[4].strip() self.log.debug("requested: " + str(ipaddress)) if ipaddress is not None or ipaddress is not "": try: ipaddress = utilities.validate_ipaddress(ipaddress) self.get_ips_data_by_time(rqst, ipaddress, uom, units) except ValueError: rqst.badRequest(badIpAddress) return elif ipaddress == None or ipaddress == "": self.get_ips_data_by_time(rqst, "", uom, units) else: rqst.badRequest() return elif len(path_tokens) == 4: self.get_ips_list_json(rqst, uom, units) else: rqst.badRequest() return def get_ips_data_by_time(self, rqst, ipaddress, uom, units): ips_manager = IpsManager() addressjsondata = ips_manager.get_data(ipaddress, uom, units) if addressjsondata is not None: # send response: rqst.sendJsonResponse(addressjsondata, 200) else: rqst.notFound() def get_ips_list_json(self, rqst, uom, units): response = "{not implemented yet.}" rqst.sendJsonResponse(response, 200)
class DatabaseHandler: def __init__(self): self.global_config = GlobalConfig() self.db_path = self.global_config['Database']['path'] self.log = Logger().get('reportserver.dao.DatabaseHandler.DatabaseHandler') # Connect to given database. # Defaults to the honeypot db, but another path can be passed in (mainly for testing). # Database needs to exist first. def connect(self, database_name): if (database_name == None): database_name = self.db_path if not os.path.exists(database_name): self.log.error("Database does not exist in path: " + database_name) return None try: conn = sqlite3.connect(database_name) except sqlite3.OperationalError as oe: self.log.error("****Problem connecting to database*** at: " + database_name) self.log.error(oe) else: return conn # Query DB and return JSON def query_db(self, query, args=(), one=False, db=None): #print ("#debug args are: " +str(args)) cur = self.connect(db).cursor() cur.execute(query, args) r = [dict((cur.description[i][0], value) \ for i, value in enumerate(row)) for row in cur.fetchall()] cur.connection.close() return (r[0] if r else None) if one else r # Unit of Measure could be "weeks", "days", "hours", "minutes". # Return all data from the DB within that measure of time as JSON. def get_json_by_time(self, portnumber, uom, units): begin_date_iso = dateTimeUtility.get_begin_date_iso(uom, units) tableName = self.global_config.get_plugin_config(portnumber)['table'] date_time_field = self.global_config.get_db_datetime_name() # query = query_db("SELECT * FROM %s where (datetime > '%s')" % (tableName, query_date_iso)) queryString = "SELECT * FROM %s where %s >= '%s' order by id, %s" % (tableName, date_time_field, begin_date_iso, date_time_field) #args = (tableName, date_time_field, begin_date_iso) self.log.info("queryString is: " + str(queryString)) #print ("args to use: " + str(args)) results = self.query_db(queryString) self.log.debug("results: " + str(results)) return results
def load_results(filename): results = None logger = Logger() try: dbfile = open(os.path.join(ANALYSIS_PATH,filename),'r') data = dbfile.read() results = json.loads(data) dbfile.close() except IOError, ioer : errorNum = ioer.errno errorCode = errno.errorcode[errorNum] errorString= os.strerror(errorNum) errorFile = ioer.filename info=(errorNum,errorCode,errorString,errorFile) logger.errorLogging(msg=info)
def __init__(self, peer_address, framework, instance_name): super().__init__() self.log = Logger().get('recon.ipinfoagent.IPInfoAgent') self.peer_address = peer_address self.cache = IPInfoCache() self.framework = framework self.instance_name = instance_name
def __init__(self, console_queue, analyzers, active_analyzers_dict): # update_analyzers_pool() self.logger = Logger() global analyzers_pool, console, active_analyzers analyzers_pool = analyzers console = console_queue active_analyzers = active_analyzers_dict
def send_analysis_task(self, name, subject, hashtag, length, client_id, time, client_ip): global console logger = Logger() cacert = None server_certificate = None server_key = None taskid = uuid.uuid4() try: for cert in os.listdir(SERVER_CERTIFICATE): if all(x in cert for x in ["pem", "server"]): server_certificate = os.path.join(SERVER_CERTIFICATE, cert) if "ca" in cert: cacert = os.path.join(SERVER_CERTIFICATE, cert) if "key" in cert: server_key = os.path.join(SERVER_CERTIFICATE, cert) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ssl_sock = ssl.wrap_socket( sock=sock, ca_certs=cacert, certfile=server_certificate, keyfile=server_key, cert_reqs=ssl.CERT_REQUIRED ) analyzer = self.get_analyzer(name, client_id, time, client_ip, hashtag, taskid, length) ssl_sock.connect((analyzer[1], ANALYZER_PORT)) console.put("Sending identity") ssl_sock.send(str((taskid, name, "checksum", hashtag, length))) data = ssl_sock.recv() console.put(data) if data == "ready": ssl_sock.sendall(subject) data = ssl_sock.recv() console.put(data) data = ssl_sock.recv() console.put(data) ssl_sock.close() subjectinfo = "Subject:%s|Hashvalue:%s|Length:%s|Client:%s|Client IP:%s|Analyzer:%s|Analyzer IP:%s " % ( name, hashtag, length, client_id, client_ip, analyzer[0], analyzer[1], ) logger.infoLogging(subjectinfo) except Exception, e: info = str(e) console.put(info) logger.errorLogging(info)
class InitServer: def __init__(self, console_queue, analyzers): self.logger = Logger() global available_servers_address, console available_servers_address = analyzers console = console_queue def run(self): global console try: console.put(str("Starting up on %s port %s" % server_address)) server = SocketServer.ThreadingTCPServer(server_address, TCPHandler) server.serve_forever() except Exception, e: info = str(e) console.put(info) self.logger.errorLogging(info)
def __init__(self, peer_address, framework, session): super().__init__() self.config = GlobalConfig() self.framework = framework self.fs_sock = self.config['Framework']['p0f.fs_sock'] self.log = Logger().get('recon.p0fagent.P0fAgent') self.peer_address = peer_address self.session = session
def setUp(self): TestCase.setUp(self) self.logger = Logger(output_dir = test_logdir) self.protocol = MockProtocol('aim', 'digsby03') self.account = MockAccount('aim', 'digsby03') self.self_buddy = MockBuddy('digsby03', self.protocol) self.buddy = MockBuddy('digsby01', self.protocol) self.convo = MockConversation(buddy = self.buddy, ischat=False, protocol=self.protocol)
def __init__(self): super().__init__() self.db = Database() self.db.create_default_database() self.q = DataQueue() self.condition = Condition() self.kill = False self.logger = Logger().get('database.datamanager.DataManager')
def __processLine(line, lineNo, prefix, fileName): """ This function is invoked on each line of the check file and returns a triplet which instructs the parser how the line should be handled. If the line is to be included in the current check group, it is returned in the first value. If the line starts a new check group, the name of the group is returned in the second value. The third value indicates whether the line contained an architecture-specific suffix. """ if not __isCheckerLine(line): return None, None, None # Lines beginning with 'CHECK-START' start a new test case. # We currently only consider the architecture suffix in "CHECK-START" lines. for debuggable in [True, False]: for arch in [None] + archs_list: startLine = __extractLine(prefix + "-START", line, arch, debuggable) if startLine is not None: return None, startLine, (arch, debuggable) # Lines starting only with 'CHECK' are matched in order. plainLine = __extractLine(prefix, line) if plainLine is not None: return (plainLine, TestAssertion.Variant.InOrder, lineNo), None, None # 'CHECK-NEXT' lines are in-order but must match the very next line. nextLine = __extractLine(prefix + "-NEXT", line) if nextLine is not None: return (nextLine, TestAssertion.Variant.NextLine, lineNo), None, None # 'CHECK-DAG' lines are no-order assertions. dagLine = __extractLine(prefix + "-DAG", line) if dagLine is not None: return (dagLine, TestAssertion.Variant.DAG, lineNo), None, None # 'CHECK-NOT' lines are no-order negative assertions. notLine = __extractLine(prefix + "-NOT", line) if notLine is not None: return (notLine, TestAssertion.Variant.Not, lineNo), None, None # 'CHECK-EVAL' lines evaluate a Python expression. evalLine = __extractLine(prefix + "-EVAL", line) if evalLine is not None: return (evalLine, TestAssertion.Variant.Eval, lineNo), None, None Logger.fail("Checker assertion could not be parsed: '" + line + "'", fileName, lineNo)
def FindCheckerFiles(path): """ Returns a list of files to scan for check annotations in the given path. Path to a file is returned as a single-element list, directories are recursively traversed and all '.java' and '.smali' files returned. """ if not path: Logger.fail("No source path provided") elif os.path.isfile(path): return [ path ] elif os.path.isdir(path): foundFiles = [] for root, dirs, files in os.walk(path): for file in files: extension = os.path.splitext(file)[1] if extension in [".java", ".smali"]: foundFiles.append(os.path.join(root, file)) return foundFiles else: Logger.fail("Source path \"" + path + "\" not found")
def __init__(self, data_adapter_config_path, source_name, encode = "utf-8", parse_try_limit = 3): self.logger = Logger("spider", source_name) self.doc_raw_adapter = DocRawAdapter(data_adapter_config_path, source_name, self.logger) self.data_raw_adapter = DataRawAdapter(data_adapter_config_path, source_name, self.logger) self.image_store_adapter = ImageStoreAdapter(data_adapter_config_path, self.logger) self.source_name = source_name self.encode = encode self.parse_try_limit = parse_try_limit self.exploring_times = 0
def validate_time_period(query_tokens): log = Logger().get('reportserver.manager.utilities') log.debug("given query_tokens:" + str(query_tokens)) uom = None units = None for token in query_tokens: if '=' in token: uom,units = token.split('=') if uom in UnitOfMeasure.get_values(UnitOfMeasure): units = int(units) break else: uom = None units = None #print("\n#debug validate_time_period: " + str(uom) + ": " + str(units)) return (uom, units)
def __init__(self, working_dir : str = None, testcases : list = [], runner : str = None, logger : Logger = None, testset_name=""): """Testset constructor.""" self.working_dir = working_dir if working_dir is not None else "" self.testcases = testcases self.logger = Logger() if logger is None else logger self.testset_name = testset_name
import os import json import logging from flask import Flask, request, abort, jsonify from teos import HOST, PORT, LOG_PREFIX from common.logger import Logger from common.appointment import Appointment from common.constants import HTTP_OK, HTTP_BAD_REQUEST, HTTP_SERVICE_UNAVAILABLE, LOCATOR_LEN_HEX # ToDo: #5-add-async-to-api app = Flask(__name__) logger = Logger(actor="API", log_name_prefix=LOG_PREFIX) class API: """ The :class:`API` is in charge of the interface between the user and the tower. It handles and server user requests. Args: inspector (:obj:`Inspector <teos.inspector.Inspector>`): an ``Inspector`` instance to check the correctness of the received data. watcher (:obj:`Watcher <teos.watcher.Watcher>`): a ``Watcher`` instance to pass the requests to. """ def __init__(self, inspector, watcher): self.inspector = inspector self.watcher = watcher
class BackFillToExcel: """ 测试结果回写 """ def __init__(self): # 引用日志类 self._log = Logger('API测试结果回写').get_logger() # 获取excel绝对路径 self._path = constant.api_result_excel_path # 初始化判断,若excel文件不存在,就创建 if not os.path.exists(self._path): BackupOrNewFile().create_result_file() # 加载excel,并转到指定的sheet页,返回WorkSheet对象 self._wb = openpyxl.load_workbook(self._path) self._ws = self._wb[self._wb.sheetnames[0]] self._log.debug('成功加载测试结果文件:{}'.format(os.path.basename(self._path))) self._log.debug('成功定位到回写数据表:{}'.format(self._wb.sheetnames[0])) def save_excel(self): """ 保存当前用例所有回填结果 :return: None """ try: # self._merge_cells_before_save() self._wb.save(self._path) except PermissionError as e: self._log.error('保存失败!!EXCEL文件被打开,请关闭后重新执行测试:{}'.format(e)) else: self._log.debug('成功保存当前用例测试结果。') def fill_api_name(self, api_name): """ 回写接口名称 :param api_name: 接口名 :return: None """ self._ws.cell(self._ws.max_row, API_NAME_COL).value = api_name self._log.debug('成功回写当前接口名称:{}'.format(api_name)) def fill_api_url(self, api_url): """ 回写接口URL :param api_url: URL :return: None """ self._ws.cell(self._ws.max_row, API_URL_COL).value = api_url self._log.debug('成功回写当前接口URL:{}'.format(api_url)) def fill_case_number(self, case_number): """ 回写用例编号 注:此函数在基类的setUp方法内第一个调用,先确定回写行数,其他回写函数就可以最大行数为准写入 :param case_number: 用例编号 :return: None """ # 第一个调用,所以行数是 max_row + 1 self._ws.cell(self._ws.max_row + 1, CASE_NUMBER_COL).value = case_number self._log.debug('成功回写当前用例编号为{},并确定当前用例回写行是第{}行'.format( case_number, self._ws.max_row)) def fill_case_name(self, case_name): """ 回写用例名称 :param case_name: 用例名 :return: None """ self._ws.cell(self._ws.max_row, CASE_NAME_COL).value = case_name self._log.debug('成功回写当前用例名称:{}'.format(case_name)) def fill_judgement_result(self, result=1): """ 回写判定结果 :param result: 0 失败,1 成功 :return: None """ if result: self._ws.cell(self._ws.max_row, JUDGEMENT_RESULT_COL).value = 'SUCCESS' self._log.debug('默认回写当前用例执行判定结果为:SUCCESS') else: self._ws.cell(self._ws.max_row, JUDGEMENT_RESULT_COL).value = 'FAILURE' self._log.debug('当前用例断言失败或执行失败,改写判定结果为:FAILURE') self._set_color_if_failure(self._ws.max_row) def fill_excepted(self, excepted_result): """ 回写预期结果 :param excepted_result: 预期结果 :return: None """ self._ws.cell(self._ws.max_row, EXPECTED_RESULT_COL).value = str(excepted_result) self._log.debug('成功回写当前用例预期结果:{}'.format(excepted_result)) def fill_compare_result(self, compare_result): """ 回写比对结果 :param compare_result: 比对结果列表 :return: None """ if not compare_result: self._ws.cell(self._ws.max_row, COMPARE_RESULT_COL).value = '比对结果一致' else: self._ws.cell(self._ws.max_row, COMPARE_RESULT_COL).value = str(compare_result) self._log.debug('成功回写当前用例比对结果:{}'.format(compare_result)) def fill_response(self, response): """ 回写返回JSON :param response: JSON :return: None """ self._ws.cell(self._ws.max_row, RESPONSE_COL).value = str(response) self._log.debug('成功回写当前用例返回结果:{}'.format(response)) def fill_test_data(self, curr_case_data): """ 回写本条用例测试数据 :param curr_case_data: 测试数据 :return: None """ self._ws.cell(self._ws.max_row, CASE_DATA_COL).value = str(curr_case_data) self._log.debug('成功回写当前用例测试数据:{}'.format(curr_case_data)) def _set_color_if_failure(self, row): """ 把执行失败的用例背景色填充为红色 :param row: 行数 :return: None """ fill = PatternFill(fill_type='solid', fgColor="FF0000") for col in range(API_NAME_COL, CASE_DATA_COL + 1): self._ws.cell(row, col).fill = fill self._log.debug('当前用例执行失败标记为红色!') def _merge_cells_before_save(self): """ 在保存之前调用,用于合并单元格(用例名和URL) :return: None """ # 确定最小合并行 min_row_ = self._ws.max_row - self._ws.cell(self._ws.max_row, CASE_NUMBER_COL).value + 1 # 合并单元格 self._ws.merge_cells(start_row=min_row_, start_column=API_NAME_COL, end_row=self._ws.max_row, end_column=API_NAME_COL) self._ws.merge_cells(start_row=min_row_, start_column=API_URL_COL, end_row=self._ws.max_row, end_column=API_URL_COL) # 设置垂直居中 align = Alignment(vertical='center') self._ws.cell(min_row_, API_NAME_COL).alignment = align self._ws.cell(min_row_, API_URL_COL).alignment = align
class DBRedis: """ 连接redis执行操作 :param db_: 库,默认第一个(0) """ def __init__(self, db_=0): # 引用日志类 self._log = Logger('REDIS').get_logger() # 获取redis配置信息 self._redis_conf = OperateConfig(config_common_path) try: pool = redis.ConnectionPool( host=self._redis_conf.get_str('redis', 'host'), port=self._redis_conf.get_str('redis', 'port'), password=self._redis_conf.get_str('redis', 'auth'), db=db_, decode_responses=True) self._conn = redis.StrictRedis(connection_pool=pool) self._log.info('成功连接REDIS,db({})'.format(db_)) except redis.exceptions.RedisError as e: self._log.error('REDIS连接失败:{}'.format(e)) def conn(self): """ 返回连接实例,可单独调用redis库的其他方法 :return: 连接实例 """ return self._conn def set_kv(self, key_, value_, ex_=None, px_=None, nx_=False, xx_=False): """ 在Redis中设置值,不存在则创建,存在则修改,并返回值; 对于有些value有业务需求字符串的,加上双引号,如:'"123456"' :param key_: key :param value_: value :param ex_: 过期时间(秒) :param px_: 过期时间(毫秒) :param nx_: 如果设置为True,则只有name不存在时,当前set操作才执行 :param xx_: 如果设置为True,则只有name存在时,当前set操作才执行 :return: 设定的value值 """ self._conn.set(key_, value_, ex=ex_, px=px_, nx=nx_, xx=xx_) self._log.info('设置成功:{}={}'.format(key_, value_)) return value_ def get_v(self, key_): """ 获取指定key的value值 :param key_: key :return: value """ value = self._conn.get(key_) if value: self._log.info('获取到{}={}'.format(key_, value)) return value else: self._log.error('键{}不存在'.format(key_)) def del_kv(self, key_): """ 删除指定的key-value :param key_: key :return: """ value = self._conn.get(key_) if value: self._conn.delete(key_) self._log.info('已删除{}={}'.format(key_, value)) else: self._log.info('指定删除的键不存在')
class DBOracle: """ 操作ORACLE """ def __init__(self): # 引用日志类 self._log = Logger("ORACLE").get_logger() # 获取数据库配置 self._db_config = OperateConfig(config_common_path) self._conn_str = '{}/{}@{}:{}/{}'.format( self._db_config.get_str('oracle', 'username'), self._db_config.get_str('oracle', 'password'), self._db_config.get_str('oracle', 'host'), self._db_config.get_str('oracle', 'port'), self._db_config.get_str('oracle', 'database')) try: self._conn = cx_Oracle.connect(self._conn_str) self._log.info('成功连接数据库') except cx_Oracle.Error as e: self._log.error('数据库连接失败:{}'.format(e)) @property def conn(self): """ 返回数据库连接实例,可单独cx_Oracle库其他方法 :return: 数据库连接实例 """ return self._conn def disconnect(self): """ 断开连接 :return: None """ self._conn.close() self._log.info('成功断开数据库') def __del__(self): self.disconnect() def select_all(self, sql_string: str): """ 执行查询sql :param sql_string: sql语句 :return: 元组列表 """ c = self._conn.cursor() self._log.info('执行查询语句:%s' % sql_string) x = c.execute(sql_string) # 获取全部结果集(元组列表),可使用下标访问结果集,如datalist[0][1] datalist = x.fetchall() self._log.info('查询结果如下:') for data in datalist: self._log.debug('第 {} 条数据:{}'.format( datalist.index(data) + 1, data)) c.close() return datalist def select_one(self, sql_string: str): """ 执行查询sql :param sql_string: sql语句 :return: 单个查询字段值或单条记录 """ c = self._conn.cursor() self._log.info('执行查询语句:%s' % sql_string) x = c.execute(sql_string) # 获取查询的单个字段的值或单条记录 data = x.fetchone() self._log.debug('查询结果如下:{}'.format(data)) c.close() if len(data[0]) == 1: return data[0][0] else: return data def execute_sql(self, sql_string: str): """ 执行插入、更新、删除操作 :param sql_string: sql语句 :return: None """ try: c = self._conn.cursor() self._log.info('执行%s语句:%s' % (sql_string.split()[0], sql_string)) c.execute(sql_string) self._conn.commit() c.close() except cx_Oracle.Error as e: self._log.error('执行失败:%s' % str(e)) self._conn.rollback() self._log.error('成功回滚操作') def exec_function(self, function_name: str, *parameters, **keyword_parameters): """ 执行指定函数,可指定参数 :param function_name: 函数名 :param parameters: 元组可变参数 :param keyword_parameters: 字典可变参数 :return: None """ try: c = self._conn.cursor() self._log.info('执行函数:{}'.format(function_name)) c.callfunc(function_name, *parameters, **keyword_parameters) c.close() except cx_Oracle.Error as e: self._log.error('执行失败:%s' % str(e)) self._conn.rollback() self._log.error('成功回滚操作') def exec_process(self, process_name, *parameters, **keyword_parameters): """ 执行指定存储过程,可指定参数 :param process_name: 过程名 :param parameters: 元组可变参数 :param keyword_parameters: 字典可变参数 :return: None """ try: c = self._conn.cursor() self._log.info('执行过程:{}'.format(process_name)) c.callproc(process_name, *parameters, **keyword_parameters) c.close() except cx_Oracle.Error as e: self._log.error('执行失败:%s' % str(e)) self._conn.rollback() self._log.error('成功回滚操作')
# Copyright (c) 2020, Yasin Hasanian # See license.txt # """ Base interface to be inherited by apps """ import core.decomposer as core import common.constants as constants import utils from common.logger import Logger from pathlib import Path log = Logger() class BaseManager(object): def __init__(self): self.decomposer = None self._lights = {'src_lights':set(), 'trg_lights':set()} self._radius = 1000 @property def lights(self): return self._lights @lights.setter
import ddt import unittest import json from common.logger import Logger from common.constants import Constants from common.read_excle import ReadExl from common.request_base import RequestBase from common.config_manager import ConfigManager from data_structure.clearing_all.clearing_keeping_accounts import ClearingKeepingAccounts from data_structure.precodition_all.precondition_keeping_accounts import PreconditionKeepingAccounts from data_structure.handle import Handle log = Logger('MachPayDispatch').get_log() exa_and_approve_list = ReadExl(Constants.EXL.PROMOTION, sheet=0).obtain_data() flow_error_has_Promotion = ReadExl.screen_case('活动记账异常流程测试有多个准备金账户', exa_and_approve_list) flow_error_none_Promotion = ReadExl.screen_case('活动记账异常流程测试没有准备金账户', exa_and_approve_list) flow_error_remain_amt_Promotion = ReadExl.screen_case('活动记账异常流程测试备用金账户余额不足', exa_and_approve_list) unusual_parameter = ReadExl.screen_case('活动记账异常调用测试用例', exa_and_approve_list) # 异常参数校验数据 @ddt.ddt class MachPromotionUnusual(unittest.TestCase): """ 活动记账测试用例: <br> 1>>活动金额记账字段异常测试 test_unusual_parameter <br> 2>>活动金额记账异常流程测试 test_error_flow*
geom for geom in list(edge_gdf['geometry']) if isinstance(geom, LineString) ]) log.info( f'found {real_edge_count - len(edges_within)} edges of {real_edge_count} outside noise data extent' ) # set noise attributes of edges within the data extent to default values (no noise) for edge in edges_within.itertuples(): graph.es[getattr(edge, E.id_ig.name)][E.noises.value] = {} graph.es[getattr(edge, E.id_ig.name)][E.noise_source.value] = '' if (__name__ == '__main__'): log = Logger(printing=True, log_file='noise_graph_update.log', level='debug') in_graph_file = 'data/hma.graphml' out_graph_file = 'out_graph/hma.graphml' data_extent_file = 'data/HMA.geojson' noise_csv_dir = 'out_csv/' data_extent: Polygon = geom_utils.project_geom( gpd.read_file(data_extent_file)['geometry'][0]) graph = ig_utils.read_graphml(in_graph_file, log) set_default_and_na_edge_noises(graph, data_extent, log) noise_graph_update(graph, noise_csv_dir, log) ig_utils.export_to_graphml(graph, out_graph_file)
import logging import time from selenium.webdriver.common.action_chains import ActionChains from base.selenium_driver import SeleniumDriver from common.logger import Logger from pages.taq_tb_page import TbTaq from pages.taq_ycjy_page import YcjyTaq logger = Logger(logger='choosesupects').getlog() logger.setLevel(level=logging.INFO) class ChoosePeople(SeleniumDriver): """ 选择案件或者嫌疑人的类 """ # 在提案器中循环查找有嫌疑人的提捕案件 def find_avaibale_tbaj(self): """ 提捕或者移诉提案器页面,在案件列表中选择有嫌疑人的案件,当页无可用案件则翻页查找 最多翻页三次 """ taq = TbTaq() # 实例化提捕提案器页面 nextpagetime = 0 # 记录当前翻页的次数 self.spsname = None flag = False # 设置标签是否找到嫌疑人 while nextpagetime <= 2 and flag == False: # 最多翻页3次,如果翻页3次还找不到可以选择的案件,那么请排查数据 ajtablelist = self.driver.find_element_by_id('caseGrid-table') table_aj_rows = ajtablelist.find_elements_by_tag_name('tr') ajnum = len(table_aj_rows) - 1 # 案件列表中案件的数量
from datebase.updateqzzt import UpdateQzzt from fileprocess.savedata import SaveResultToFile from zfxtyw.addaj import AddAJ from zfxtyw.choose_qzcs_type import ChooseQzcs from zfxtyw.choose_supect import ChoosePeople from pages.home_page import * import logging from common.logger import Logger from zfxtyw.chooseywname import ChooseXtName from zfxtyw.fill_ajxx import FillElementValue from zfxtyw.getajid import GetAjidFromUrl from zfxtyw.jzxt_uploadfile import UploadFile from zfxtyw.login import LoginPageTest from zfxtyw.start_yw import StartYw logger = Logger(logger='tb-qlc').getlog() logger.setLevel(level=logging.INFO) class TbTest(unittest.TestCase): """ 提捕完整流程测试 """ @classmethod def setUpClass(cls): browser = BrowserDriver(cls) cls.driver = browser.open_browser(browser, 'ga') # 选择公安或者政法端进行登录,浏览器的选择在ini文件中进行配置 # # def switch_window(self, number):
def __init__(self): self.logger = Logger(logger="Assertions").getlog()
class Assertions: def __init__(self): self.logger = Logger(logger="Assertions").getlog() def assert_code(self, code, expect_code): """ 验证响应码 :param code:响应码 :param expect_code: 预期响应码 :return: """ try: assert code == expect_code return True except: self.logger.error("status_code error! expect_code:%s,but code:%s" % (expect_code, code)) raise def assert_msg(self, body, msg, expect_msg): """ 验证响应实体内容 :param body:响应实体 :param msg: 实体内容 :param expect_msg: 预期实体内容 :return: """ try: assert body[msg] == expect_msg return True except: self.logger.error("msg error! expect_msg:%s,but msg:%s" % (expect_msg, msg)) raise def assert_in_text(self, body, expect_msg): """ 验证响应实体中是否包含某个预期的字符串 :param body: 实体内容 :param expect_msg: 预期字符串 :return: """ try: text = json.loads(body) assert expect_msg in text return True except: self.logger.error("expect_msg is not included! ") raise def assert_time(self, time, expect_time): """ 验证响应时间是否超过预期时间,单位:ms :param time: :param expect_time: :return: """ try: assert time < expect_time return True except: self.logger.error("response_time > expect_time,reponse_time:%s" % time) raise
class RspGame(object): """ 随机整数[1,3]决定机器人出招,分别为1:石头;2:剪刀;3:布 """ def __init__(self, game_num): self.log = Logger() self.game_all_num = game_num self.player_score = 0 self.com_score = 0 self.player_name = '' self.img_dir = ['rock', 'scissors', 'paper', 'emoticon'] # self.rsp_img = [os.path.join(GAME_IMAGE_PATH, 'rock_1.jpg'), os.path.join(GAME_IMAGE_PATH, 'scissors_1.jpg'), os.path.join(GAME_IMAGE_PATH, 'paper_1.jpg')] # print(self.rsp_img) self.draw_msg = " 平局了,继续来~" self.fail_msg = " 我输了 " self.win_msg = " 我赢了 " self.over_msg = [" 游戏结束,你输了", " 游戏结束,恭喜你赢了"] self.msg_code = {"石头": 0, "剪刀": 1, "布": 2} def start(self, player_name): self.player_score = 0 self.com_score = 0 self.player_name = player_name def random_img(self, random_num): list_dir = os.listdir( os.path.join('resources', 'game', self.img_dir[random_num])) path = choice(list_dir) self.log.info('choose:-->{}'.format(path)) return os.path.join('resources', 'game', self.img_dir[random_num], path) def get_result(self, winer): """获取本场比赛情况""" if winer == 1: self.player_score += 1 if self.player_score == (self.game_all_num + 1) / 2: return 1, '@' + self.player_name + self.over_msg[1] else: return 0, '@' + self.player_name + self.fail_msg elif winer == -1: self.com_score += 1 if self.com_score == (self.game_all_num + 1) / 2: return 1, '@' + self.player_name + self.over_msg[0] else: return 0, '@' + self.player_name + self.win_msg def play(self, msg): """ 返回[a, b, c] a代表游戏是否已经结束 1:结束 0:未结束 b代表本次出招结果 c代表本次出招图片 :param msg: :return: """ self.log.info('play:{}'.format(msg)) real_msg = msg.text.split() valid_msg = real_msg[len(real_msg) - 1] self.log.debug('commond:{}'.format(valid_msg)) if str.find(valid_msg, "不玩") != -1 or str.find(valid_msg, "退出") != -1: return 1, '@' + self.player_name + " 虽然半途而废不怎么好听,但有时候放弃也是一种聪明的选择", self.random_img( 3) elif valid_msg != "石头" and valid_msg != "剪刀" and valid_msg != "布": return 0, '@' + self.player_name + " 你这是要跟我石头剪刀布吗?", self.random_img( 3) random_num = random.randint(1, 3) - 1 self.log.debug('random_num:{}'.format(random_num)) self.log.debug('msg_code:{}'.format(self.msg_code[valid_msg])) # 1:玩家 -1:机器人 0:平局 winer = (random_num - self.msg_code[valid_msg] + 4) % 3 - 1 if winer == 0: return 0, '@' + self.player_name + self.draw_msg, self.random_img( random_num) else: can, res_msg = self.get_result(winer) return can, res_msg, self.random_img(random_num)
else: raise NameError('No Matching') if __name__=='__main__': result_parent = './result/' result_child = '/cifar10_ini0.01_wgans_maxgp0.1_relu_max0.9_0.99_bs64_lr0.0001_0.0001_128fres2k3act0upconv_128fres2k3act0downconv_sphz100/' result_dirs=os.listdir(result_parent) Data = {} for result_dir in sorted(result_dirs): dir_splits = result_dir.split('_') dir_dict = dict(zip(keys_, dir_splits[0:7])) logger=Logger() test_dir=result_parent+result_dir+result_child logger.set_dir(test_dir) x_vals,y_vals=logger.load_and_return() y_vals=np.array(y_vals) name = get_name(dir_dict) Data[dir_splits[0]] = { 'info':dir_dict,'name':name, 'y_vals':y_vals, 'x_vals':x_vals } plt_start,plt_stop = 0 , 120000 # this list is used to decide the plot targets and their order ordered_keys = sorted(Data.keys()) figureNo=1 figsize = (8,6)
def send_email(self, email_subject): """ 发送邮件 :param email_subject: 邮件主题 :return: """ # 引用日志类 log = Logger("发送邮件").get_logger() s_mtp = self._login_mailbox() log.info('成功登录邮箱:%s' % self._from_man) report_path_filename = self._get_report_path() log.info('成功获取测试报告路径:%s' % report_path_filename) email_content = self._get_report_html() log.info('成功解析邮件HTML正文') # 邮件带附件申明,并设置邮件头 log.info('设置邮件头...') msg_root = MIMEMultipart('related') msg_root['from'] = self._from_man msg_root['to'] = ','.join(self._to_man) msg_root['Subject'] = Header(email_subject + ' at ' + asctime(), 'utf-8') # 设置邮件HTML正文 log.info('设置HTML正文...') msg = MIMEText(email_content, 'html', 'utf-8') msg_root.attach(msg) # 设置测试报告HTML附件 log.info('设置HTML附件...') report = MIMEText( open(report_path_filename, 'rb').read(), 'base64', 'utf-8') report.add_header('content-disposition', 'attachment', filename=self._report_name) msg_root.attach(report) # 设置API测试结果EXCEL附件 if self._result_file: log.info('设置EXCEL附件...') excel = MIMEApplication( open(constant.api_result_excel_path, 'rb').read()) excel.add_header( 'Content-Disposition', 'attachment', filename=constant.api_result_excel_path.split('/')[1]) msg_root.attach(excel) # 发送 log.info('正在发送邮件至:%s' % ','.join(self._to_man)) try: s_mtp.sendmail(self._from_man, self._to_man, msg_root.as_string()) except smtplib.SMTPException as err: log.error('邮件发送出错:' + str(err)) else: log.info('成功邮件发送') finally: s_mtp.quit() log.info('成功退出邮箱')
class DBMySql: """ 操作MYSQL """ def __init__(self): # 引用日志类 self._log = Logger("MYSQL").get_logger() # 获取数据库配置 self._db_config = OperateConfig(config_common_path) try: self._conn = pymysql.connect( user=self._db_config.get_str('mysql', 'username'), password=self._db_config.get_str('mysql', 'password'), host=self._db_config.get_str('mysql', 'host'), port=self._db_config.get_str('mysql', 'port'), database=self._db_config.get_str('mysql', 'database')) self._log.info('成功连接数据库') except pymysql.Error as e: self._log.error('数据库连接失败:{}'.format(e)) @property def conn(self): """ 返回数据库连接实例,可单独PyMySql库其他方法 :return: 数据库连接实例 """ return self._conn def disconnect(self): """ 断开连接 :return: None """ self._conn.close() self._log.info('成功断开数据库') def __del__(self): self.disconnect() def select_all(self, sql_string: str): """ 执行查询sql :param sql_string: sql语句 :return: 元组列表 """ c = self._conn.cursor() self._log.info('执行查询语句:%s' % sql_string) x = c.execute(sql_string) datalist = x.fetchall() self._log.info('查询结果如下:') for data in datalist: self._log.debug('第 {} 条数据:{}'.format( datalist.index(data) + 1, data)) c.close() return datalist def select_one(self, sql_string: str): """ 执行查询sql :param sql_string: sql语句 :return: 单个查询字段值或单条记录 """ c = self._conn.cursor() self._log.info('执行查询语句:%s' % sql_string) x = c.execute(sql_string) # 获取查询的单个字段的值或单条记录 data = x.fetchone() self._log.debug('查询结果如下:{}'.format(data)) c.close() if len(data[0]) == 1: return data[0][0] else: return data def execute_sql(self, sql_string: str): """ 执行插入、更新、删除操作 :param sql_string: sql语句 :return: None """ try: c = self._conn.cursor() self._log.info('执行%s语句:%s' % (sql_string.split()[0], sql_string)) c.execute(sql_string) self._conn.commit() c.close() except pymysql.Error as e: self._log.error('执行失败:%s' % str(e)) self._conn.rollback() self._log.error('成功回滚操作')
class BackupOrNewFile: """ 封装或新建测试结果excel 一般在本次测试测试启动前,先调用backup_result_file备份上一次的文件 在调用create_result_file创建本次文件 """ def __init__(self): # 引用日志类 self._log = Logger('备份或新建API测试结果文件').get_logger() # 获取excel绝对路径 self._path = constant.api_result_excel_path def backup_result_file(self): """ 用于每次执行完整测试前备份excel文件 需要在执行测试前单独调用此函数先备份,再调用create_result_file函数创建新的excel文件 :return: None """ if os.path.exists(self._path): current_time = strftime('%Y-%m-%d %H-%M-%S') backup_file_name = '{} backup at {}.xlsx'.format( self._path.split('.')[0], current_time) move(self._path, backup_file_name) self._log.info('成功备份测试结果文件:{}'.format(backup_file_name)) else: self._log.info('不存在需要备份的文件。') def create_result_file(self): """ 用于每次备份完成后调用,创建新的excel文件 :return: None """ if not os.path.exists(self._path): wb = openpyxl.Workbook() ws = wb.active ws.title = 'results' self._set_row_one_values(ws) self._set_row_one_width_and_height(ws) self._set_row_one_styles(ws) # 冻结首行 ws.freeze_panes = 'A2' wb.save(self._path) self._log.info('成功创建测试结果文件:{}'.format(self._path)) else: self._log.info('{}文件已存在,请先做好备份。'.format( os.path.basename(self._path))) @staticmethod def _set_row_one_values(ws): """ 设置excel标题栏的值 :param ws: worksheet对象 :return: None """ ws.cell(1, API_NAME_COL).value = '接口名称' ws.cell(1, API_URL_COL).value = '接口URL' ws.cell(1, CASE_NUMBER_COL).value = '用例编号' ws.cell(1, CASE_NAME_COL).value = '用例名称' ws.cell(1, JUDGEMENT_RESULT_COL).value = '判定结果' ws.cell(1, EXPECTED_RESULT_COL).value = '预期结果' ws.cell(1, COMPARE_RESULT_COL).value = '比对结果' ws.cell(1, RESPONSE_COL).value = '响应结果' ws.cell(1, CASE_DATA_COL).value = '测试数据' @staticmethod def _set_row_one_width_and_height(ws): """ 设置excel标题栏列宽和行高 :param ws: worksheet对象 :return: None """ ws.column_dimensions[get_column_letter(API_NAME_COL)].width = 30.0 ws.column_dimensions[get_column_letter(API_URL_COL)].width = 30.0 ws.column_dimensions[get_column_letter(CASE_NUMBER_COL)].width = 12.0 ws.column_dimensions[get_column_letter(CASE_NAME_COL)].width = 30.0 ws.column_dimensions[get_column_letter( JUDGEMENT_RESULT_COL)].width = 12.0 ws.column_dimensions[get_column_letter( EXPECTED_RESULT_COL)].width = 40.0 ws.column_dimensions[get_column_letter( COMPARE_RESULT_COL)].width = 40.0 ws.column_dimensions[get_column_letter(RESPONSE_COL)].width = 60.0 ws.column_dimensions[get_column_letter(CASE_DATA_COL)].width = 40.0 ws.row_dimensions[1].height = 25.0 @staticmethod def _set_row_one_styles(ws): """ 设置excel标题栏样式 :param ws: worksheet对象 :return: None """ # 定义字体样式,填充颜色,边框 font = Font(name=u'宋体', size=14, bold=True) fill = PatternFill(fill_type='solid', fgColor="32CD32") border = Border(left=Side(border_style='double', color='FF000000'), right=Side(border_style='double', color='FF000000'), top=Side(border_style='double', color='FF000000'), bottom=Side(border_style='double', color='FF000000')) for col in range(CASE_DATA_COL): ws.cell(1, col + 1).font = font ws.cell(1, col + 1).fill = fill ws.cell(1, col + 1).border = border
# -*- coding: utf-8 -*- from selenium import webdriver from common.logger import Logger from common.operation_yaml import Yaml from config.config_path import chrome_path, read_yaml_path, ie_path, firefox_path yaml = Yaml() logger = Logger(logger='Browser').getlog() class Browser(): def open_browser(self): ''' 设置浏览器属性 :return: 返回浏览器实例 ''' cfg = yaml.read_yaml(read_yaml_path) # normal 浏览器正常模式 if cfg['browser']['type'] == 'chrome': options = webdriver.ChromeOptions() # options.add_argument('headless') # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败 options.add_argument('start-maximized') # 最大化浏览器 options.add_argument('disable-infobars') # 去除浏览器黄条警告 options.add_argument('disable-extensions') # 禁用浏览器扩展 options.add_argument('disable-popup-blocking') # 禁止弹出拦截信息 options.add_argument('disable-gpu') # 规避chrome浏览器bug browser = webdriver.Chrome(options=options, executable_path=chrome_path) logger.info('启动Chrome浏览器') elif cfg['browser']['type'] == 'ie': browser = webdriver.Ie(executable_path=ie_path) elif cfg['browser']['type'] == 'firefox':
class AnalyzeResults: """ 分析处理测试结果 """ def __init__(self): # 引用日志类 self._log = Logger('分析处理API测试结果').get_logger() # 获取excel路径 self._path = constant.api_result_excel_path # 加载excel,返回WorkBook对象 self._wb = openpyxl.load_workbook(self._path) self._log.info('成功加载测试结果文件:{}'.format(os.path.basename(self._path))) # 创建分析结果sheet页 self._wb.create_sheet('analysis', index=0) self._log.info('成功创建分析结果sheet页:analysis') # 定义一些变量 self._case_num = 0 # 用例总数 self._case_failure = 0 # 失败用例数 self._api_num = 0 # 接口总数 self._api_failure = 0 # 失败接口数 def exec_analysis(self): """ 执行分析计算 :return: None """ self._log.info('开始分析测试结果:') self._get_api_num() self._get_api_failure_num() self._get_case_num() self._get_case_failure_num() self._set_known_cells_value() result_api = self._statistical_failure_interfaces() first_max_row = len(result_api) + 4 self._fill_failure_interfaces(result_api) self._set_unknown_cells_value(first_max_row) second_max_row = first_max_row + 3 result_case = self._statistical_failure_cases() self._fill_failure_cases(result_case, second_max_row) self._draw_pie_charts() self._draw_bar_chart(first_max_row) self._log.info('测试结果分析完毕.') self._save_excel() def _save_excel(self): """ 保存分析结果 :return: None """ self._wb.save(self._path) self._log.info('成功保存,请打开excel文件analysis数据表查看!') def _statistical_failure_interfaces(self): """ 统计失败接口 :return: None """ ws = self._wb['results'] self._log.info('正在提取失败接口相关数据...') # 先找到所有接口第一个和最后一个用例所在行,生成两个列表 first_rows = [] last_rows = [] for i in range(2, ws.max_row + 1): if ws.cell(i, CASE_NUMBER_COL).value == 1: first_rows.append(i) if i != 2: last_rows.append(i - 1) last_rows.append(ws.max_row) # 计算失败接口数据 result_all = [] for i, j in zip(first_rows, last_rows): result_single = {} failure_num = 0 for row in range(i, j + 1): if ws.cell(row, JUDGEMENT_RESULT_COL).value == 'FAILURE': failure_num += 1 if failure_num: result_single['api_name'] = ws.cell(i, API_NAME_COL).value result_single['case_num'] = j - i + 1 result_single['failure_num'] = failure_num result_single['success_num'] = j - i + 1 - failure_num result_all.append(result_single) self._log.info('提取成功:{}'.format(result_all)) return result_all def _fill_failure_interfaces(self, data): """ 写入失败接口相关数据 :param data: 传入数据 :return: None """ ws = self._wb['analysis'] self._log.info('正在写入提取结果...') rows = [row for row in range(4, len(data) + 4)] for d, row in zip(data, rows): ws.cell(row, 1).value = d['api_name'] ws.cell(row, 2).value = d['success_num'] ws.cell(row, 3).value = d['failure_num'] ws.cell(row, 4).value = d['case_num'] self._log.info('写入完毕.') def _statistical_failure_cases(self): """ 统计失败用例 :return: None """ ws = self._wb['results'] self._log.info('正在提取失败用例相关数据...') case_failures = [] for i in range(2, ws.max_row + 1): case_failure = [] if ws.cell(i, JUDGEMENT_RESULT_COL).value == 'FAILURE': case_failure.append(ws.cell(i, API_NAME_COL).value) case_failure.append(ws.cell(i, CASE_NAME_COL).value) case_failure.append(ws.cell(i, CASE_NUMBER_COL).value) # 计算当前行范围,用作超链接目标 row_region = '{}{}:{}{}'.format( get_column_letter(API_NAME_COL), i, get_column_letter(CASE_DATA_COL), i) case_failure.append(row_region) if case_failure: case_failures.append(case_failure) self._log.info('提取成功:{}'.format(case_failures)) return case_failures def _fill_failure_cases(self, data, fill_row): """ 写入失败用例相关数据 :param data: 用例测试数据 :param fill_row: 写入行 :return: None """ ws = self._wb['analysis'] self._log.info('正在写入提取结果...') rows = [row for row in range(fill_row, fill_row + len(data))] for d, row in zip(data, rows): ws.cell(row, 1).value = d[0] ws.cell(row, 2).value = d[1] ws.cell(row, 3).value = d[2] ws.cell(row, 4).value = '点我查看失败原因' ws.cell(row, 4).hyperlink = ('{}#results!{}'.format( constant.API_TEST_RESULT_EXCEL, d[3])) self._log.info('写入完毕.') def _draw_pie_charts(self): """ 画两个饼图 :return: None """ ws = self._wb['analysis'] # 设置单元格值,饼图引用 ws['G3'] = '失败' ws['G4'] = '通过' ws['H3'] = self._api_failure ws['H4'] = self._api_num - self._api_failure ws['N3'] = '失败' ws['N4'] = '通过' ws['O3'] = self._case_failure ws['O4'] = self._case_num - self._case_failure # 画接口饼图 pie = PieChart() labels = Reference(ws, min_col=7, min_row=3, max_row=4) data = Reference(ws, min_col=8, min_row=2, max_row=4) pie.add_data(data, titles_from_data=True) pie.set_categories(labels) pie.title = "接口执行情况" slice_ = DataPoint(idx=0, explosion=10) pie.series[0].data_points = [slice_] ws.add_chart(pie, "F1") pie.height = 9.5 pie.width = 13 self._log.info('已生成接口执行情况饼图.') # 画用例饼图 pie2 = PieChart() labels2 = Reference(ws, min_col=14, min_row=3, max_row=4) data2 = Reference(ws, min_col=15, min_row=2, max_row=4) pie2.add_data(data2, titles_from_data=True) pie2.set_categories(labels2) pie2.title = "用例执行情况" slice2_ = DataPoint(idx=0, explosion=10) pie2.series[0].data_points = [slice2_] ws.add_chart(pie2, "M1") pie2.height = 9.5 pie2.width = 13 self._log.info('已生成用例执行情况饼图.') def _draw_bar_chart(self, row_): """ 画垂直条形图 :param row_: 起始行 :return: None """ ws = self._wb['analysis'] bar = BarChart() bar.type = 'bar' bar.style = 11 bar.title = '失败接口概况图' bar.y_axis.title = '通过或失败用例个数' if row_ != 4: data = Reference(ws, min_col=2, min_row=3, max_row=row_ - 1, max_col=3) else: data = Reference(ws, min_col=2, min_row=3, max_row=row_, max_col=3) if row_ != 4: cats = Reference(ws, min_col=1, min_row=4, max_row=row_ - 1) else: cats = Reference(ws, min_col=1, min_row=4, max_row=row_) bar.add_data(data, titles_from_data=True) bar.set_categories(cats) bar.shape = 4 ws.add_chart(bar, "F12") bar.width = 30 bar.height = 0.5 * (row_ + 20) # 根据行数计算自适应条形图高度 self._log.info('已生成失败接口概况条形图.') def _get_case_num(self): """ 计算最大用例数 :return: None """ ws = self._wb['results'] case_num = ws.max_row - 1 self._case_num = case_num self._log.info('计算出本次测试用例数:{}'.format(case_num)) def _get_case_failure_num(self): """ 计算失败用例数 :return: None """ ws = self._wb['results'] case_failure = 0 for i in range(2, ws.max_row + 1): if ws.cell(i, JUDGEMENT_RESULT_COL).value == 'FAILURE': case_failure += 1 self._case_failure = case_failure self._log.info('计算出本次测试失败用例数:{}'.format(case_failure)) def _get_api_num(self): """ 计算接口总数 :return: None """ ws = self._wb['results'] api_num = 0 for i in range(2, ws.max_row + 1): if ws.cell(i, CASE_NUMBER_COL).value == 1: api_num += 1 self._api_num = api_num self._log.info('计算出本次测试接口数:{}'.format(api_num)) def _get_api_failure_num(self): """ 计算失败接口数 :return: None """ ws = self._wb['results'] api_failure = [] for i in range(2, ws.max_row + 1): if ws.cell(i, JUDGEMENT_RESULT_COL).value == 'FAILURE': api_failure.append(ws.cell(i, API_NAME_COL).value) api_failure = list(set(api_failure)) self._api_failure = len(api_failure) self._log.info('计算出本次测试失败接口数:{}'.format(len(api_failure))) def _set_known_cells_value(self): """ 先设置可确定位置单元格的值,并处理格式 :return: None """ ws = self._wb['analysis'] value_a1 = '分析结果:\n' \ '1.本次接口计 {0:^7} 个,测试用例 {1:^7} 个。\n' \ '2.失败接口计 {2:^7} 个,通过接口 {3:^7} 个。\n' \ '3.失败用例计 {4:^7} 个,通过用例 {5:^7} 个。\n' \ '4.接口通过率 {6:^9.2%} ,用例通过率 {7:^9.2%} 。'.format( self._api_num, self._case_num, self._api_failure, self._api_num - self._api_failure, self._case_failure, self._case_num - self._case_failure, (self._api_num - self._api_failure) / self._api_num, (self._case_num - self._case_failure) / self._case_num) ws.cell(1, 1).value = value_a1 ws.cell(2, 1).value = '失败接口概览' ws.cell(3, 1).value = '接口名称' ws.cell(3, 2).value = '通过' ws.cell(3, 3).value = '失败' ws.cell(3, 4).value = '总计' # 合并单元格,设置字体样式,行高,列宽 ws.column_dimensions['A'].width = 36 ws.column_dimensions['B'].width = 19 ws.column_dimensions['C'].width = 19 ws.column_dimensions['D'].width = 19 ws.row_dimensions[1].height = 135 ws.row_dimensions[2].height = 25 ws.row_dimensions[3].height = 20 ws.merge_cells('A1:D1') ws.merge_cells('A2:D2') font1 = Font(size=16, bold=True) font2 = Font(size=14, bold=True) font3 = Font(size=12, bold=True) ws.cell(1, 1).font = font1 ws.cell(2, 1).font = font2 for i in range(4): ws.cell(3, i + 1).font = font3 fill1 = PatternFill(fill_type='solid', fgColor="00BFFF") fill2 = PatternFill(fill_type='solid', fgColor="87CEFA") ws.cell(1, 1).fill = fill1 for i in range(4): ws.cell(3, i + 1).fill = fill2 alignment1 = Alignment(vertical='center') alignment2 = Alignment(vertical='center', horizontal='center') ws.cell(1, 1).alignment = alignment1 ws.cell(2, 1).alignment = alignment2 def _set_unknown_cells_value(self, row_): """ 再设置未确定位置单元格的值 :param row_: 起始行 :return: None """ ws = self._wb['analysis'] ws.cell(row_, 1).value = '总计' if row_ != 4: ws.cell(row_, 2).value = '=SUM(B4:{})'.format('B' + str(row_ - 1)) ws.cell(row_, 3).value = '=SUM(C4:{})'.format('C' + str(row_ - 1)) ws.cell(row_, 4).value = '=SUM(D4:{})'.format('D' + str(row_ - 1)) else: ws.cell(row_, 2).value = 0 ws.cell(row_, 3).value = 0 ws.cell(row_, 4).value = 0 for i in range(4): ws.cell(row_, i + 1).font = Font(bold=True) ws.cell(row_ + 1, 1).value = '失败用例概览' ws.cell(row_ + 2, 1).value = '接口名称' ws.cell(row_ + 2, 2).value = '用例名称' ws.cell(row_ + 2, 3).value = '用例编号' ws.cell(row_ + 2, 4).value = '超链接' ws.merge_cells('{}:{}'.format(('A' + str(row_ + 1)), ('D' + str(row_ + 1)))) ws.row_dimensions[row_ + 1].height = 25 ws.row_dimensions[row_ + 2].height = 20 font1 = Font(size=14, bold=True) font2 = Font(size=12, bold=True) ws.cell(row_ + 1, 1).font = font1 for i in range(4): ws.cell(row_ + 2, i + 1).font = font2 fill = PatternFill(fill_type='solid', fgColor="87CEFA") for i in range(4): ws.cell(row_ + 2, i + 1).fill = fill alignment = Alignment(vertical='center', horizontal='center') ws.cell(row_ + 1, 1).alignment = alignment
def __init__(self): # 引用日志类 self._log = Logger('备份或新建API测试结果文件').get_logger() # 获取excel绝对路径 self._path = constant.api_result_excel_path
import logging import time from base.selenium_driver import SeleniumDriver from common.logger import Logger from pages.yw_list_tb import TbYwListPage from pages.yw_list_wshy import WshyYwListPage from pages.yw_list_ycjy import YcjyYwListPage from pages.yw_list_ys import YsYwListPage logger = Logger(logger='search-aj').getlog() logger.setLevel(level=logging.INFO) class SearchAJ(SeleniumDriver): """ 业务列表中查询信息 """ def __init__(self, driver): super().__init__(driver) self.driver = driver # 提捕列表中查询案件 def search_tbxyrxm(self, xyrxm): tb = TbYwListPage() # 实例化页面,获取元素,对元素进行操作 self.text_input(xyrxm, tb.xyrxminputtext(), 'xpath') # 点击添加案件按钮 self.click(tb.searchbutton(), 'id') time.sleep(5) # 提捕列表中双击案件名称进入案件信息 def enter_tbajxx(self):
from allure import title from common.logger import Logger import traceback, logging, allure from common.queries import * from conf import Api_Url as c from httpRequests._requests import * """ 测试用例模板 不需读取用例文件的脚本将caseName,casedata行删除即可 Looger('')中的传参控制生成的日志文件名 FileLevel控制写入日志文件的日志等级 """ # caseName, casedata = GettingDate('.yml').return_data() #GettingDate中输入yml配置文件名,可读取文件中数据,返回两组数据(用例标题,用例数据) logger = Logger( '商品信息', FileLevel=logging.INFO).getlog() # FileLevel设置写入log日志中的等级,第一个参数是生成的日志文件名 @allure.feature('商品信息') @allure.story('通过门店和类目id获得商品信息') @allure.severity('blocker') class Test_getProductCategoryByStoreIdAndCatId: """ 测试手机号为隐形个人手机号, 后期若需要更换账号进行测试只需要更改phone= 和 password= result是对返回的success进行断言,1为True, 0为False 目前用例内需要准备url和参数 noToken的请求是不带x-stream-id 非Token """ def setup(self):
from common.logger import Logger logger = Logger.create_logger('rce')
# -*- coding: UTF-8 -*- import pymongo from common.logger import Logger logger = Logger("storage.py") class Mongo(): def __init__(self, db_name, collection_name): self.db_nam = db_name self.col_nam = collection_name self.collection = self.conn() # 建立连接 def conn(self, ): # 连接本地 # client = pymongo.MongoClient(host="127.0.0.1", port=27017) # client = pymongo.MongoClient(host="127.0.0.1", port=11227) # 连接到云服务器上,带上用户名和密码 # client = pymongo.MongoClient('mongodb://*****:*****@192.168.192.27:27017/') client = pymongo.MongoClient(host="127.0.0.1", port=27017) # 判断是否有错 if client is None: logger.error("mongodb连接失败...") # return None db = client[self.db_nam]
high_veg_share, E.gvi_comb_gsv_veg.value: combine_gvi_indexes(gsv_gvi, low_veg_share, high_veg_share), E.gvi_comb_gsv_high_veg.value: combine_gvi_indexes(gsv_gvi, low_veg_share, high_veg_share, omit_low_veg=True) }) return graph if __name__ == '__main__': log = Logger(printing=True, log_file=r'green_view_join_v1.log', level='debug') subset = False log.info(f'Starting GVI join with graph subset: {subset}') graph_file_in = r'graph_in/kumpula.graphml' if subset else r'graph_in/hma.graphml' graph_file_out = r'graph_out/kumpula.graphml' if subset else r'graph_out/hma.graphml' edge_table_db_name = 'edge_buffers_subset' if subset else 'edge_buffers' execute_sql = db.get_sql_executor(log) db_tables = db.get_db_table_names(execute_sql) # load GSV GVI points from GPKG gsv_gvi_gdf = load_gsv_gvi_gdf(r'data/greenery_points.gpkg')
class Base_Case(unittest.TestCase): @classmethod def setUpClass(cls) -> None: urllib3.disable_warnings() # 去除警告 warnings.simplefilter("ignore", ResourceWarning) driver_manager = Driver_Manager() cls.driver = driver_manager.get_driver(login_loc['browserType']) cls.driver.implicitly_wait(10) @classmethod def tearDownClass(cls) -> None: cls.driver.quit() def setUp(self) -> None: self.log = Logger() self.imgs = [] self.start = time.perf_counter() self.log.info("============【{}测试用例开始】====================".format( self._testMethodName)) self.driver.implicitly_wait(10) self.base = Base_Page(self.driver, self.log) self.log.info("【操作系统】:" + str(platform.platform())) self.log.info("【浏览器】:" + login_loc['browserType']) self.driver.get(login_loc['URL']) self.log.info("【打开URL】:" + login_loc['URL_TEST']) self.base.wait(1) def tearDown(self) -> None: self.end = time.perf_counter() self.log.info('【用例运行时长】: %.2f秒' % (self.end - self.start)) self.log.info( "====================【{}测试用例结束】====================".format( self._testMethodName)) def default_login(self): self.base.input(By.XPATH, login_loc['登录页面']['用户名'], login_loc['数据']['用户名']) self.base.input(By.XPATH, login_loc['登录页面']['密码'], login_loc['数据']['密码']) self.base.click(By.XPATH, login_loc['登录页面']['登录按键']) self.base.wait(2) self.log.info("【默认登录成功】")
def MatchFiles(checkerFile, c1File, targetArch, debuggableMode): for testCase in checkerFile.testCases: if testCase.testArch not in [None, targetArch]: continue if testCase.forDebuggable != debuggableMode: continue # TODO: Currently does not handle multiple occurrences of the same group # name, e.g. when a pass is run multiple times. It will always try to # match a check group against the first output group of the same name. c1Pass = c1File.findPass(testCase.name) if c1Pass is None: with file(c1File.fileName) as cfgFile: Logger.log(''.join(cfgFile), Logger.Level.Error) Logger.fail("Test case not found in the CFG file", testCase.fileName, testCase.startLineNo, testCase.name) Logger.startTest(testCase.name) try: MatchTestCase(testCase, c1Pass, c1File.instructionSetFeatures) Logger.testPassed() except MatchFailedException as e: lineNo = c1Pass.startLineNo + e.lineNo if e.statement.variant == TestStatement.Variant.Not: msg = "NOT statement matched line {}" else: msg = "Statement could not be matched starting from line {}" msg = msg.format(lineNo) with file(c1File.fileName) as cfgFile: Logger.log(''.join(cfgFile), Logger.Level.Error) Logger.testFailed(msg, e.statement, e.variables)
def __init__(self, ARCH, DATA, datadir, logdir, path=None, model_mode='salsanext'): # parameters self.ARCH = ARCH self.DATA = DATA self.datadir = datadir self.log = logdir self.path = path self.model_mode = model_mode self.batch_time_t = AverageMeter() self.data_time_t = AverageMeter() self.batch_time_e = AverageMeter() self.epoch = 0 # put logger where it belongs self.info = { "train_update": 0, "train_loss": 0, "train_acc": 0, "train_iou": 0, "valid_loss": 0, "valid_acc": 0, "valid_iou": 0, "best_train_iou": 0, "best_val_iou": 0 } # get the data parserModule = imp.load_source( "parserModule", booger.TRAIN_PATH + '/tasks/semantic/dataset/' + self.DATA["name"] + '/parser.py') self.parser = parserModule.Parser( root=self.datadir, train_sequences=self.DATA["split"]["train"], valid_sequences=self.DATA["split"]["valid"], test_sequences=None, labels=self.DATA["labels"], color_map=self.DATA["color_map"], learning_map=self.DATA["learning_map"], learning_map_inv=self.DATA["learning_map_inv"], sensor=self.ARCH["dataset"]["sensor"], max_points=self.ARCH["dataset"]["max_points"], batch_size=self.ARCH["train"]["batch_size"], workers=self.ARCH["train"]["workers"], gt=True, shuffle_train=True) # weights for loss (and bias) # weights for loss (and bias) epsilon_w = self.ARCH["train"]["epsilon_w"] content = torch.zeros(self.parser.get_n_classes(), dtype=torch.float) for cl, freq in DATA["content"].items(): x_cl = self.parser.to_xentropy( cl) # map actual class to xentropy class content[x_cl] += freq self.loss_w = 1 / (content + epsilon_w) # get weights for x_cl, w in enumerate( self.loss_w): # ignore the ones necessary to ignore if DATA["learning_ignore"][x_cl]: # don't weigh self.loss_w[x_cl] = 0 print("Loss weights from content: ", self.loss_w.data) # concatenate the encoder and the head with torch.no_grad(): self.model = SalsaNet(self.ARCH, self.parser.get_n_classes(), self.path) self.tb_logger = Logger(self.log + "/tb", self.model) # GPU? self.gpu = False self.multi_gpu = False self.n_gpus = 0 self.model_single = self.model pytorch_total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad) for name, param in self.model.named_parameters(): if param.requires_grad: print("{}: {:,}".format(name, param.numel())) print( "Total of Trainable Parameters: {:,}".format(pytorch_total_params)) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") print("Training in device: ", self.device) if torch.cuda.is_available() and torch.cuda.device_count() > 0: cudnn.benchmark = True cudnn.fastest = True self.gpu = True self.n_gpus = 1 self.model.cuda() if torch.cuda.is_available() and torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") self.model = nn.DataParallel(self.model) # spread in gpus self.model = convert_model(self.model).cuda() # sync batchnorm self.model_single = self.model.module # single model to get weight names self.multi_gpu = True self.n_gpus = torch.cuda.device_count() self.criterion = nn.NLLLoss(weight=self.loss_w).to(self.device) self.ls = Lovasz_softmax(ignore=0).to(self.device) # loss as dataparallel too (more images in batch) if self.n_gpus > 1: self.criterion = nn.DataParallel( self.criterion).cuda() # spread in gpus self.ls = nn.DataParallel(self.ls).cuda() self.optimizer = optim.SGD([{ 'params': self.model.parameters() }], lr=self.ARCH["train"]["lr"], momentum=self.ARCH["train"]["momentum"], weight_decay=self.ARCH["train"]["w_decay"]) # Use warmup learning rate # post decay and step sizes come in epochs and we want it in steps steps_per_epoch = self.parser.get_train_size() up_steps = int(self.ARCH["train"]["wup_epochs"] * steps_per_epoch) final_decay = self.ARCH["train"]["lr_decay"]**(1 / steps_per_epoch) self.scheduler = warmupLR(optimizer=self.optimizer, lr=self.ARCH["train"]["lr"], warmup_steps=up_steps, momentum=self.ARCH["train"]["momentum"], decay=final_decay) if self.path is not None: torch.nn.Module.dump_patches = True w_dict = torch.load(path + "/SalsaNet", map_location=lambda storage, loc: storage) self.model.load_state_dict(w_dict['state_dict'], strict=True) self.optimizer.load_state_dict(w_dict['optimizer']) self.epoch = w_dict['epoch'] + 1 self.scheduler.load_state_dict(w_dict['scheduler']) print("dict epoch:", w_dict['epoch']) self.info = w_dict['info'] print("info", w_dict['info'])
import common.cryptographer from common.logger import Logger from common.appointment import Appointment from common.cryptographer import Cryptographer from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX from test.teos.unit.conftest import ( get_random_value_hex, generate_dummy_appointment_data, generate_keypair, bitcoind_connect_params, get_config, ) common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) APPOINTMENT_OK = (0, None) NO_HEX_STRINGS = [ "R" * LOCATOR_LEN_HEX, get_random_value_hex(LOCATOR_LEN_BYTES - 1) + "PP", "$" * LOCATOR_LEN_HEX, " " * LOCATOR_LEN_HEX, ] WRONG_TYPES = [ [], "", get_random_value_hex(LOCATOR_LEN_BYTES), 3.2, 2.0, (),
from requests.exceptions import MissingSchema, InvalidSchema, InvalidURL from cli.help import show_usage, help_add_appointment, help_get_appointment from cli import DEFAULT_CONF, DATA_DIR, CONF_FILE_NAME, LOG_PREFIX import common.cryptographer from common.blob import Blob from common import constants from common.logger import Logger from common.appointment import Appointment from common.config_loader import ConfigLoader from common.cryptographer import Cryptographer from common.tools import setup_logging, setup_data_folder from common.tools import check_sha256_hex_format, check_locator_format, compute_locator logger = Logger(actor="Client", log_name_prefix=LOG_PREFIX) common.cryptographer.logger = Logger(actor="Cryptographer", log_name_prefix=LOG_PREFIX) def load_keys(teos_pk_path, cli_sk_path, cli_pk_path): """ Loads all the keys required so sign, send, and verify the appointment. Args: teos_pk_path (:obj:`str`): path to the TEOS public key file. cli_sk_path (:obj:`str`): path to the client private key file. cli_pk_path (:obj:`str`): path to the client public key file. Returns: :obj:`tuple` or ``None``: a three item tuple containing a teos_pk object, cli_sk object and the cli_sk_der