def mill(traj, z_start=0, z_stop=None, z_step = 1, FF=3000, FZ=200, options=None): 'вырезаем траекторию' from strategy import Strategy from tool import Tool cut = Strategy() t = Tool() t.F = FF t.FZ = FZ #берем опции xx = get_option(options, 'x', 0) yy = get_option(options, 'y', 0) sc = get_option(options, 'scale', 1) ang = get_option(options, 'angle', 0) if z_stop == None: #просто один слой там, где скажут cut.mill(traj, tool=t, x=xx, y=yy, scale=sc, angle=ang, options=options) return opt = options.copy() z = z_start while z >= z_stop: opt['z'] = z cut.mill(traj, tool=t, x=xx, y=yy, scale=sc, angle=ang, options=opt) z -= z_step if z + z_step > z_stop: #дорезаем остатки opt['z'] = z_stop cut.mill(traj, tool=t, x=xx, y=yy, scale=sc, angle=ang, options=opt)
def extractAttribute(self, key, line, error_attributes, warning_attributes): attrs = self.dico_attributes[key] if Tool._is_array(attrs): for attr in attrs: attr_value_found = self.matchAttribute(line, attr, error_attributes, warning_attributes) if attr_value_found: break else: attr_value_found = self.matchAttribute(line, attrs, error_attributes, warning_attributes) if attr_value_found: text = Tool.replaceNonASCII(attr_value_found,html=True) else: text = "" return text
class Login: def __init__(self, user=1, pwd=b'123', host="61.151.180.166", port=8000): host = "61.151.180.158" self.tool = Tool() self.md2 = self.tool.md5(user, pwd) self.user = User(self.md2) self.host = self.tool.get_host_by_name(host) self.port = port self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sendPacket = b'' self.address = (host, port) print(self.address) def get_md2(self): return self.md2 def set_value(self, key, value=b''): if hasattr(self, key): #v = getattr(self,key,None) setattr(self, key, value) else: print('no', key) def start_fill(self, key=None): pro = TXProtocol() sp = b'' # sendPacket if key != None: if type(key) != bytes: raise RuntimeError('key must bytes') pro.set_value("SecretKey", key) #pro.set("", ) sp += pro.fill_login_head() sp += pro.fill_login_encrypt() sp += pro.fill_login_end() self.sendPacket = sp return sp def send_0825(self): self.start_fill() print("发送包", self.sendPacket) try: pack = Tool.hexs_to_int(self.sendPacket) # print(pack) #self.udp_socket.sendto(pack,self.address) #one_thr = threading.Thread(target = self.get_0825) #one_thr.start() data = self.udp_socket.recv(2048) print("data", data) except BaseException: print("Error") def get_0825(self): print("get_0825") while True: print("get_0825_2048_START") data = self.udp_socket.recv(2048) print(data) print("get_0825_2048_END")
def __init__(self, config): self.config = config self.model = None self.word_vocab = None self.tag_vocab = None self.train_dev_data = None self.bigram_vocab = None self.lattice_vocab = None self.tool = Tool(self.config)
def response(data, template = None): """封装请求相应""" display = str(request.args.get('_display', 'html')) # 显示方式 tool = Tool() if 'json' == display or template is None: return tool.responseSuccess(data) else: return render_template(template, **data)
def test_new_getDeeplinkPicConfig_picList_type(self): '''验证新color 图片返回数据格式是否正确''' result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1, msg="event_id = {}".format(self.params['event_id'])) if result['data']['eventType'] != "feature": self.assertTrue(Tool.check_type(result['data']['picList'], dict))
def test_new_daily_isAssKeyword(self): '''验证新color 图片资源关键字是否齐全''' keylist = ["picNpic", "picThumbnail", "picColorImg", "picFrameImg"] result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1) self.assertTrue(Tool.check_isKeyword_picAssets( result['data']['picList'], keylist), msg="资源关键字缺失")
def __get_cases_content_html(self, data): table = Tool().get_yaml('project.config.repore.table') new_titles = [] for k, v in table.items(): obj = {'name': k, 'text': v} new_titles.append(obj) return Template(Tool().open_file( os.path.dirname(__file__) + '/reportTpl/content.html')).render( datas=data, titles=new_titles)
def addTool(context, REQUEST=None): """ Add OpenMeetings tool from ZMI """ tool = Tool(id=config.ID) tool.title = config.TITLE context._setObject(config.ID, tool) tool = context._getOb(config.ID) if REQUEST: return context.manage_main(context, REQUEST, update_menu=1) return tool.getId()
def test_new_daily_isPicKeyword(self): '''验证新color 图片数据关键字是否齐全''' keylist = [ "picName", "picType", "picUnlockDate", "picExpireDate", "picAssets" ] result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1) self.assertTrue(Tool.check_isKeyword(result['data']['picList'], keylist), msg="图片关键字缺失")
def __init__(self, user=1, pwd=b'123', host="61.151.180.166", port=8000): host = "61.151.180.158" self.tool = Tool() self.md2 = self.tool.md5(user, pwd) self.user = User(self.md2) self.host = self.tool.get_host_by_name(host) self.port = port self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sendPacket = b'' self.address = (host, port) print(self.address)
def test_new_getDeeplinkPicConfig_isAssKeyword(self): '''验证新color 活动资源关键字是否齐全''' keylist = ["picNpic", "picThumbnail", "picColorImg", "picFrameImg"] result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1, msg="event_id = {}".format(self.params['event_id'])) if result['data']['eventType'] != "feature": self.assertTrue(Tool.check_isKeyword( result['data']['picList'][0]['deeplinkPicAsset'], keylist), msg="资源关键字缺失")
def test_new_getDeeplinkPicConfig_isPicKeyword(self): '''验证新color 活动数据关键字是否齐全''' keylist = ["picId", "deeplinkPicAsset"] result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1, msg="event_id = {}".format(self.params['event_id'])) if result['data']['eventType'] != "feature": self.assertTrue(Tool.check_isKeyword(result['data']['picList'], keylist), msg="活动关键字缺失")
def test_new_gallery_isPicKeyword(self): '''验证新color图片数据关键字是否齐全''' keylist = [ "picName", "picType", "picClass", "picUnlockDate", "picVipUnlockDate", "picExpireDate", "picUnlockType", "picUnlockNumber", "picJigsawId", "picAssets", "picOrder", "picComicId", "picComicKey" ] result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1) self.assertTrue(Tool.check_isKeyword(result['data']['picList'], keylist), msg="图片关键字缺失")
def test_new_getNewsConfig_ios_picList_type(self): '''验证新color IOS 图片返回数据格式是否正确''' self.url = "{}?game_ver={}&os_type={}®ister_date={}&game_date={}&game_actDay={}".format( self.base_url, self.__class__.value_dict['game_ver'], "Ios", self.__class__.value_dict['register_date'], self.game_date, self.__class__.value_dict['game_actDay'], ) result = Tool.request_post_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1) self.assertTrue(Tool.check_type(result['data']['picList'], dict))
def __init__(self): self.actions_db_filename = "" self.actions_list_assignees = [] Tool.__init__(self) self._loadSQLConfig() if 0==1: try: with open(self.actions_db_filename): pass except IOError: print 'SQLite database does not exists.' if tkMessageBox.askokcancel("Create Action Items SQLite database", "Do you want to create new database ?"): self.sqlite_create_actions_db()
def test_new_gallery_pictype_check(self): '''验证新color图片类型是否符合请求''' pictype_list = [ "Jigsaw", "Animated", "Special", "Character", "Animal", "Flower", "Places", "Nature", "Message", "Mosaic", "Mandala", "Other" ] self.params['pic_type'] = pictype_list[random.randint( 0, len(pictype_list) - 1)] result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1) self.assertTrue(Tool.check_pic_type(result['data']['picList'], self.params['pic_type']), msg="{}类型存在异常".format(self.params['pic_type']))
def get_comm_url(self, all_url): response = requests.get(all_url, headers=self.headers) html = response.text comm_html = re.search('项目地址<.*?</table>', html, re.S | re.M).group() comm_info_list = re.findall('<tr>(.*?)</tr>', comm_html, re.S | re.M) for i in comm_info_list: comm_detail = re.search('href="(.*?)"', i, re.S | re.M).group(1) tool = Tool() replace_str = tool.url_quote(comm_detail, encode='gbk') comm_detail_url = 'http://113.106.199.148/web/' + replace_str comm = self.get_comm_detail(comm_detail_url) comm_url_encode = re.search('href=.*?href="(.*?)"', i, re.S | re.M).group(1) replace_str = tool.url_quote(comm_url_encode, encode='gbk') comm_url = 'http://113.106.199.148/web/' + replace_str self.get_comm_info(comm_url,comm)
class Pipeline(): def __init__(self, out_img_dir=None): # tool for processing one frame and save the frame when the ball is released to out_img_dir self.tool = Tool(out_img_dir) def process_video(self, video_path): ''' Process one video, write the frame ID for when the ball was released to the log file :param video_path: path to the video of a baseball pitch ''' cap = cv2.VideoCapture(video_path) video_base_name = self.tool.get_base(video_path) # process videos frame by frame, from the first to the last frame frame_indices = np.arange(1, int(cap.get(cv2.CAP_PROP_FRAME_COUNT))) for i in tqdm.tqdm(frame_indices): success, frame = cap.read() if success: # stop the program after finding the frame where ball was released, and record the frame index found_ball = self.tool.find_ball(i, frame, video_base_name) if found_ball: frame_ball_release = i break # save results to log file self.parse_log_file.write( 'Ball released at frame {} for video {}\n'.format( frame_ball_release, video_base_name)) self.parse_log_file.flush() def process_all_video(self, video_dir, parse_log): ''' Process all videos in a directory :param video_dir: Directory containing all videos to be processed :param parse_log: Log file that records the frame index where the ball was released ''' self.parse_log_file = open(parse_log, "w") # get a list of video paths, in the alphabetical order of file names video_paths = sorted(glob.glob(video_dir + "*.mp4"), key=self.tool.get_base) for video_path in video_paths: # process each video and write the frame index where the ball was released to parse_log_file self.process_video(video_path) self.parse_log_file.close()
def _process_ntc_query_imsi(self, message, address): """process the heartbeat message from client.get the number of 4G Mobile translation modules \ and the number of 4G Union translation modules. message:type:dict address:type:tuple """ response_imsi_list = [] recv_imsi_list = message.get(MsgKey.imsi)#the type of recv_imsi_list list if recv_imsi_list is None: logger.error('package format error %s, from address %s:', str(message), str(address)) return False #get imsi from imsi_list for imsi in recv_imsi_list: table_name, store_code = Tool.parse_imsi(imsi, mnc_2_tablename)#parse imsi,return table_name and store_code in database try: row = self.database.query(table_name, store_code) if row: pass else: response_imsi_list.append(imsi)#返回未查询到的imsi列表 except Exception as e: logger.error(e.message) #if response_imsi_list is empty, no response if response_imsi_list: self._response(address, id=RspMsgIdValue.ntc_query_imsi_rep, imsilist=response_imsi_list)
def save_face(): try: timestamp = request.form['timestamp'] token = request.form['token'] if token != Tool.get_md5(app.config.get('TOKEN') + str(timestamp)): return 'Fail' name = request.form['name'] desc = request.form['desc'] ip = request.form['ip'] cmd = request.form['cmd'] face_img = request.files['face'] path = os.path.split( os.path.realpath(__file__) )[0] + os.path.sep + "static" + os.path.sep + "faces" + os.path.sep + name if not os.path.exists(path): os.mkdir(path) face_path = path + os.path.sep + face_img.filename face_img.save(face_path) user_id = FaceDao.add_user( User(username=name, ip=ip, cmd=cmd, desc=desc)) if user_id != -1: face_relative_path = 'faces/' + name + '/' + face_img.filename FaceDao.add_face( Face(face=face_relative_path, time_point=time.time(), user_id=user_id)) return json.dumps({"code": 0}) except Exception as e: return json.dumps({"code": -1})
def test_output_files_count(): # # 3. Assert output.files.count == 4 # directories = [] tool = Tool(INPUT_FILE_VALID) assert len(tool.files) == FILES_COUNT
def matchEndLLR(self,data): error_attributes = [] warning_attributes = [] debug_attributes = [] attrs = self.dico_attributes["end"] #print "DEBUG DATA",data #print "DEBUG ATTRS:",attrs if Tool._is_array(attrs): for attr in attrs: attr_value_found = self.matchAttribute(data, attr, error_attributes, warning_attributes) #print "DEBUG attr_value_found:",attr_value_found if attr_value_found: break else: attr_value_found = self.matchAttribute(data, attrs, error_attributes, warning_attributes, debug_attributes) if attr_value_found: end_delimiter =attr_value_found #self.debug("End delimiter found:{:s}".format(Tool.removeNonAscii(end_delimiter))) result = True else: result = False return result
def test_input_file_page_count(): # # 3. Assert exception if input.pdf.page.size != 6 # tool = Tool(INPUT_FILE_VALID) assert tool.pages_count == PAGES_COUNT assert len(tool.pages) == PAGES_COUNT
def test_getNewsConfig_ios_newsListType(self): '''验证IOS event数据是否正确''' r = requests.get(self.url, params=self.params) result = r.json() # 断言 self.assertEqual(result['errorCode'], -1) self.assertTrue(Tool.check_type(result['data']['newsList'], dict))
def test_input_file_each_qrcode_is_valid_path(): # # 5. Assert exception for each if input.file.qrcode != file path format # tool = Tool(INPUT_FILE_VALID) for qrcode in tool.qrcodes: assert qrcode in FOLDERS
def test_each_output_directories_fullpath_is_one_of_input_qrcode_value(): # # 1. Assert for each output.directories.fullpath is one of input.qrcode.value[] # tool = Tool(INPUT_FILE_VALID) for each in tool.files: assert each.folder in FOLDERS
def test_daily_picList_type(self): '''验证图片返回数据格式是否正确''' r = requests.get(self.url, params=self.params) result = r.json() # 断言 self.assertEqual(result['errorCode'], -1) self.assertTrue(Tool().check_type(result['data']['picList'], dict))
def test_new_daily_ios_success(self): '''测试新color IOS daily成功''' self.params['os_type'] = "Ios" result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['data']['picLength'], 1) self.assertEqual(result['errorCode'], -1)
def test_new_daily_ios_type(self): '''验证新color IOS 返回值是否正确''' self.params['os_type'] = "Ios" result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1) self.assertIsInstance(result, dict)
def _loadConfigMySQL(self): tool = Tool() #self.gen_dir = "result" try: # get generation directory #self.gen_dir = self.getOptions("Generation","dir") conf_synergy_dir = tool.getOptions("Apache","mysql_dir") self.mysql_exe = os.path.join(conf_synergy_dir, 'mysql.exe') except IOError as exception: print "Config reading failed:", exception try: print self.mysql_exe with open(self.mysql_exe): pass except IOError: print "mysql_exe not found." self.mysql_exe = False
def save(self): if self.face is None: return name = self.le_name.text() if name.strip(' ') == '': self.btn_save.setText('请输入名称') self.le_name.setFocus() return desc = self.le_desc.text() if desc.strip(' ') == '': self.btn_save.setText('请输入欢迎语') self.le_desc.setFocus() return ip = self.le_ip.text() if ip.strip(' ') == '': self.btn_save.setText('请输入IP') self.le_ip.setFocus() return cmd = self.le_cmd.text() if cmd.strip(' ') == '': self.btn_save.setText('请输入指令') self.le_cmd.setFocus() return if not os.path.exists('faces/{}'.format(name)): os.mkdir('faces/{}'.format(name)) face_name = 'faces/{}/{}.png'.format(name, time.time()) cv2.imwrite(face_name, self.face) result = Tool.upload(name, desc, ip, cmd, face_name) if result: self.close() else: self.btn_save.setText('上传识别,请检查网络连接')
def __init__(self, start_time, end_time, case_data): self.start_time = start_time self.end_time = end_time self.case_data = case_data self.report_html_path = base_dir() + '/runtime/html/report_' + g_get( 'main') + '.html' Tool().remove_file(self.report_html_path)
def __init__(self): self.tool = Tool() self.link = Link() if not self.db: self.db = self.link.connectMysql(db='maimai') if not self.r: self.r = self.link.connectRedis() if not self.sp: self.sp = self.link.connectSphinx() cf = ConfigParser() cf.read('conf/config.ini') self.imageHost = cf.get('aliyun_oss', 'host')
def test_new_getDeeplinkPicConfig_requestsError(self): '''测试新color requests错误''' del self.params['event_id'] result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['error_code'], 666) self.assertEqual(result['error_msg'], "path: event_id, error: event_id is required.")
def getProcInBody(self,data): #m = re.match(r'^\s*(.*)', data) # Search for test procedures plain_data = Tool.removeNonAscii(data) list_found_procedures = re.findall(r'SHLVCP_[A-Z_]*_[0-9]{4}_PROC_?[0-9]{0,3}\.bproc',plain_data) #self.debug("Body found:{:s}".format(plain_data)) #self.debug("End Body found") return list_found_procedures
def getObjectiveInBody(self,data): plain_data = Tool.removeNonAscii(data) m = re.search(r'Objective:(.*)Test diagram',plain_data) if m: objective =m.group(1) else: objective = False return objective
def test_new_getDeeplinkPicConfig_ios_event_id(self): '''验证新color IOS 返回活动id是否于请求参数相同''' result = Tool.request_get_result(self.url, self.params) # 断言 self.assertEqual(result['errorCode'], -1, msg="event_id = {}".format(self.params['event_id'])) self.assertEqual(result['data']['eventId'], self.params['event_id'])
def getAtribute(dico,attr): if attr in dico: value = Tool.removeNonAscii(dico[attr]) # Remove tabulation value = re.sub(r"\t",r"",value) else: value = "None" return value
def getUsedIOInBOdy(self,data): plain_data = Tool.removeNonAscii(data) m = re.search(r'Used I/O:(.*)',plain_data) if m: used_io =m.group(1) else: used_io = False return used_io
def listDir(self): """ Recursive function to find files in directories. Treatment for Excel and Word file is different :return: """ self.depth += 1 new_concat_dirname = self.basename for dir in self.stack: new_concat_dirname = join(new_concat_dirname,dir) if sys.platform.startswith('win32'): new_concat_dirname = "{:s}\\".format(new_concat_dirname) else: new_concat_dirname = "{:s}/".format(new_concat_dirname) try: isdir = os.path.isdir(new_concat_dirname) if isdir: list_dir = os.listdir(new_concat_dirname) else: list_dir = [new_concat_dirname] except OSError as e: try: self.log("{:s}".format(str(e))) except UnicodeEncodeError as exception: pass list_dir = [] for found_dir in list_dir: path_dir = os.path.join(new_concat_dirname, found_dir) isdir = os.path.isdir(path_dir) if isdir: self.stack.append(found_dir) self.listDir() self.stack.pop() else: void = re.sub(r"(~\$)(.*)\.(.*)",r"\1",found_dir) extension = Shlvcp.getFileExt(found_dir) type = Tool.getType(found_dir) if "doc" in extension and void != "~$": self.log("Parse {:s}".format(found_dir),gui_display=True) filename = join(new_concat_dirname,found_dir) self.func_ptr(filename) elif extension in ("bproc") and void != "~$": self.log("Parse {:s} type {:s}".format(found_dir,type),gui_display=True) filename = join(new_concat_dirname,found_dir) if self.enable_check_bproc: dico = self.readProc(filename) if not dico: self.log("Reject {:s} as a generic".format(found_dir),gui_display=True) else: print "Exclude BPROC procedure:",filename else: self.log("Discard {:s}".format(found_dir),gui_display=True) # Wrong Word format, only openxml text = "Unexpected format for {:s}, only ('doc','docx','docm','bproc') accepted".format(found_dir) self.log(text) self.depth -= 1
def matchAttribute(self, data, attr, error_attributes=[], warning_attributes=[], debug_attributes=[]): #print "Atribute selected:",attr, m = re.match("^\s*" + attr + "\s*(.*)", data) if m: attr_value_found = m.group(1) self.debug("Attributes found {:s} {:s}".format(attr,Tool.removeNonAscii(attr_value_found))) #print "Attributes found ",attr,attr_value_found # test semi colon presence m = re.search(';', attr_value_found) if m: value_filtered = re.sub(r";",r",",attr_value_found) debug_attributes.append("Unexpected semi-colon found in \"{:s}\" attribute.".format(attr)) #print "ERROR",error else: value_filtered = attr_value_found # test missing missing comma in "Refer to" attribute if attr == "Refers to:" or attr == "Constraint by:": char = {r'\t':'',r' ':''} for before, after in char.iteritems(): value_filtered = re.sub(before,after,value_filtered) #print "TEST:",attr_value_found # Find double brackets m = re.search(r'\]\]', value_filtered) if m: error_attributes.append("Double brackets in {:s} attribute.".format(attr)) value_filtered = re.sub(r"\]\]",r"]",value_filtered) m = re.search(r'\[\[', value_filtered) if m: error_attributes.append("Double brackets in {:s} attribute.".format(attr)) value_filtered = re.sub(r"\[\[",r"[",value_filtered) m = re.match(r'^\[(.*)\]', value_filtered) if m: inside_brackets = m.group(1) # Between brackets # Find brackets without separator m = re.match(r'(.*)\] ?\[(.*)', inside_brackets) if m: debug_attributes.append("Missing comma in \"{:s}\" attribute.".format(attr)) result = re.sub(r"\] ?\[",r"],[",value_filtered) else: result = value_filtered #print "TEST2:",result else: result = value_filtered else: result = value_filtered if result == "": result = "EMPTY" else: #print "UNKNOWN:",data result = False return result
def parseTable(self, start, end, list_tbl_tables=[]): try: doc_range = self.doc.Range(start,end) nb_tables = doc_range.Tables.Count tables_counter = 1 while tables_counter <= nb_tables: print "Process table:",tables_counter self.log("Process table:{:d}".format(tables_counter)) tbl = doc_range.Tables(tables_counter) nb_rows = len(tbl.Rows) + 1 #print "NB_ROWS",nb_rows nb_cols = len(tbl.Columns) + 1 tbl_tables = [] del(tbl_tables[:]) header = True for row in range(1, nb_rows): line = [] del(line[:]) for col in range(1, nb_cols): try: txt = Tool.replaceNonASCII(tbl.Cell(row, col).Range.Text) line.append(txt) except: #print "Warning, encounter joined cells." self.log("Warning, encounter joined cells.") pass # exception for joined cells if header: str_line = "|".join(line) self.log("Table found:{:s}".format(str_line)) header = False tbl_tables.append(line) #list_attributes["table"] = tbl_tables #print "inside tbl",tbl_tables # TODO: rendre applicable qunad plus d'un document est parsé if tables_counter in list_tbl_tables: # Already exists list_tbl_tables[tables_counter].extend(tbl_tables[:]) else: # New table list_tbl_tables[tables_counter] = tbl_tables[:] tables_counter += 1 else: pass #list_attributes["table"] = None except pythoncom.com_error as e: print "UN:",e #print "DEUX:",e.excepinfo[5] #print(win32api.FormatMessage(e.excepinfo[5])) #print "Treat:",start_delimiter,start,end #list_attributes["table"] = None return nb_tables
def invert(self): """ Get requirement versus file ex: Input: {'SSCS_ESSNESS_ET2788_S-6D1': [u'SSCS_ESSNESS_0001', etc. Output: {'SSCS_ESSNESS_9020': ['SSCS_ESSNESS_ET2788_S-6D1'], 'SSCS_ESSNESS_9141': ['SSCS_ESSNESS_ET2788_S-6D1'], :return: """ self.list_llr_vs_file = Tool._invert_dol(self.tbl_file_llr)
def getData(self,raw,key): import html2text id = raw[key,"id"] context = raw[key,"scope"] + " " + raw[key,"type"] + " " + raw[key,"review_id"] description = raw[key,"Description"] #print "DESC_1:",description description_wo_nonascii = Tool.replaceNonASCII(description) #print "DESC_2:",description_wo_nonascii description_plain_txt = html2text.html2text(description_wo_nonascii) #print "DESC_3:",description_plain_txt impact = raw[key,"context"] criticality = raw[key,"criticality"] assignee = raw[key,"lname"] expected = raw[key,"date_expected"][0:10] status = raw[key,"status"] response = raw[key,"comment"] response_plain_txt = html2text.html2text(Tool.replaceNonASCII(response)) tbl = [id,context,description_plain_txt,impact,criticality,assignee,expected,status,response_plain_txt] return tbl
def getDerived(self, type, refer, derived, found_dir, start_delimiter, key="derived"): """ :param type: :param refer: :param derived: :param found_dir: :param start_delimiter: :return: """ def isDerived(found,expected): found = found.upper() expected = expected.upper() #print "DERIVED",found,expected if found == expected: result = True else: result = False return result def testPartiallyDerived(refer): if refer in ("N/A","EMPTY","NO"): result = False else: result = True return result # Test derived requirements result = False #print "DERIVED TYPE",type #print "derived",derived if type in self.dico_specifications: expected_derived_list = self.dico_specifications[type][key] #print "expected_derived_list",expected_derived_list if Tool._is_array(expected_derived_list): for expected_derived in expected_derived_list: result = isDerived(derived, expected_derived) if result: break else: result = isDerived(derived, expected_derived_list) if result: if key == "derived": partially_derived = testPartiallyDerived(refer) if partially_derived: self.dico_errors["derived","S_2",found_dir,start_delimiter,""] = ["Derived requirement with traceability."] self.nb_error += 1 return result
def __init__(self, ihm=None, **kwargs): """ :param ihm: :param kwargs: :return: """ for key in kwargs: self.__dict__[key] = kwargs[key] self.ihm = ihm Tool.__init__(self) self._loadConfigSynergy() if "system" in self.__dict__ and "item" in self.__dict__: self.old_cr_workflow = self.get_sys_item_old_workflow(self.__dict__["system"],self.__dict__["item"]) else: self.old_cr_workflow = False if "detect" in self.__dict__: self.setDetectRelease(self.__dict__["detect"]) if "implemented" in self.__dict__: self.setImplRelease(self.__dict__["implemented"]) if "cr_domain" in self.__dict__: self.setDomain(self.__dict__["cr_domain"]) self.list_change_requests = []
def readProc(self,filename): dico = dict() m = Shlvcp.findGenericBproc(filename) if not m: # No generic procedures small_filename = Tool.getFileNameAlone(filename) self.list_proc.append(small_filename) tree = ET.parse(filename) root = tree.getroot() execution_info = root.find('ExecutionInfo') dico= dict() for element in execution_info: dico[element.tag]=element.text print "{:s} {:s} {:s}".format(dico["Tester"],dico["SanctionAuto"],dico["ExecutionDate"]) self.dico_proc[small_filename] = dico return dico
def __init__(self): self.tool = Tool() self.link = Link() if not self.db: self.db = self.link.connectMysql(db = 'maimai') if not self.r: self.r = self.link.connectRedis() if not self.sp: self.sp = self.link.connectSphinx() cf = ConfigParser() cf.read('conf/config.ini') self.imageHost = cf.get('aliyun_oss', 'host')
def matchBegin(self, data, type): # print "DEBUG DATA:",data if data not in (None,0): # Regex # \s : Matches any whitespace character like a blank space, tab, and the like. m = re.match(r'^\s*\[({:s}.*)\]'.format(type), data) if m: start_delimiter = m.group(1) #print "DEBUG start_delimiter:",start_delimiter self.debug("Start delimiter found:{:s} beginning with {:s}".format(Tool.removeNonAscii(start_delimiter),type)) result = start_delimiter else: result = False else: result = False return result
def test_populate_components_listbox(self): from tool import Tool fenetre = Tk() queue = Queue.Queue() docid.interface = Interface(queue,fenetre) componentslistbox = Listbox(docid.interface) tool = Tool() result = tool.populate_components_listbox(componentslistbox,1,"ESSNESS","Dassault F5X PDS") print "populate_components_listbox",result result = tool.populate_components_listbox(componentslistbox,1,"SDSIO","Dassault F5X SDS") print "populate_components_listbox",result result = tool.populate_components_listbox(componentslistbox,1,"WHCC","Dassault F5X WDS") print "populate_components_listbox",result result = tool.populate_components_listbox_wo_select(componentslistbox,"ESSNESS","Dassault F5X PDS") print "populate_components_listbox_wo_select",result result = tool.populate_components_listbox_wo_select(componentslistbox,"","Dassault F5X PDS") print "populate_components_listbox_wo_select",result result = tool.populate_components_listbox_wo_select(componentslistbox,"","") print "populate_components_listbox_wo_select",result
def parse_end_req(self, tbl_req=[], # Input tbl_output=[]): # Output iter_list = iter(tbl_req) myRange = self.doc.Content sel = self.doc.Application.Selection found = self.find(myRange,style='REQ_End') start_first_req_part = myRange.Start if not found: print "Missing REQ_End style in document." print "Start:",myRange.Start print "End:",myRange.End req_id = "0" while found: error = False txt = myRange.Text #m = re.match(r'^\s*\[End Requirement\]',txt) m = self.matchEndLLR(txt) if m: # Style is coherent with tag text self.debug("Found REQ_end: {:s}".format(txt)) start_req_end = myRange.Start end_req_end = myRange.End else: self.debug("Style is not coherent with tag text: {:s}".format(Tool.removeNonAscii(txt))) error = True try: req_id,start_tag,end_tag = iter_list.next() if not error: tbl_output.append((req_id,end_tag,end_req_end)) #print "REQ:",req_id,start,end_req_end #print "TXT:",txt found = self.find_execute() except StopIteration: print "End iterations on requirement {:s}".format(req_id) # End of iteration break end_req_part = myRange.End return end_req_part
def parse(db, raw_recipe): """parsing recipe download into recipe structure""" (raw_name, raw_ingredients, raw_directions) = raw_recipe tokenized_dirs = [ nltk.word_tokenize(d) for d in raw_directions] tagged_directions = [ nltk.pos_tag(d) for d in tokenized_dirs ] name = raw_name ingredients = [Ingredient.parse(db, i) for i in raw_ingredients] directions = [ Direction.parse(d, ingredients) for d in tagged_directions ] methods = Method.find_methods(directions) tok_text = [ word for d in tokenized_dirs for word in d ] tools = Tool.find_tools(tok_text) return Recipe(name, ingredients, tools, methods, directions)
def createCCB(self, list_projects, cr_domain, list_action_items, cr_with_parent, dico, list_cr_for_ccb, # User selection list from _getListCRForCCB status_list, # User selection availability flag _getListCRForCCB ccb_time=False, dico_former_cr_status_list={}, tableau_pr_unsorted=[], found_cr=False, ccb_time_obj="", **kwargs): """ This function creates the document based on the template - open template docx - get sections of the template - replace tag in document - create zip . copy unmodified section . copy modified section """ for key in kwargs: self.__dict__[key] = kwargs[key] if "system" not in self.__dict__: if "system" in dico: self.system = dico["system"] else: self.system = "Default" print "Missing system name" if self.ccb_cr_parent == "yes": cr_with_parent = True name,mail,tel,service,qams_user_id = self.get_user_infos(dico["login"]) if dico["author"] in ("","Nobody"): dico["author"] = Tool.replaceNonASCII(name) #self.old_cr_workflow = self.get_sys_item_old_workflow(dico["system"], # dico["item"]) #self.setDetectRelease(dico["detect"]) #self.setImplRelease(dico["implemented"]) #self.ccb_type = cr_domain cr_domain = self.getDomain() if self._is_array(cr_domain): list_cr_domain_str = ",".join(cr_domain) else: list_cr_domain_str = cr_domain self.setListCR(list_cr_for_ccb, status_list) # CR list created based on list self.tableau_pr #tableau_pr_unsorted,found_cr = self.getPR_CCB(cr_with_parent=cr_with_parent, # cr_type=dico["cr_type"]) # if time capsule is activated #print "dico_former_cr_status_list",dico_former_cr_status_list dico_time_capsule = {} #print "tableau_pr_unsorted",tableau_pr_unsorted if found_cr: for cr in tableau_pr_unsorted: #cr_id = cr[2] cr_id = cr[2].lstrip('0') #print "CR_ID__",cr_id current_cr_status = cr[3] if cr_id in dico_former_cr_status_list: # update status with former status in the past former_cr_status = dico_former_cr_status_list[cr_id] dico_time_capsule[cr_id] = {"current":current_cr_status, "former":former_cr_status} cr[3] = former_cr_status print "dico_time_capsule",dico_time_capsule print "CR",cr #print "dico_time_capsule",dico_time_capsule # Sort CR according to ID, status or severity column # by default CR are sorted by severity if found_cr: if self.ccb_cr_sort == "": tableau_pr_sorted = sorted(tableau_pr_unsorted,key=self._getSeverity) else: if self.ccb_cr_sort == "id": tableau_pr_sorted = sorted(tableau_pr_unsorted,key=lambda x: x[2]) elif self.ccb_cr_sort == "status": tableau_pr_sorted = sorted(tableau_pr_unsorted,key=lambda x: x[3]) elif self.ccb_cr_sort == "severity": tableau_pr_sorted = sorted(tableau_pr_unsorted,key=self._getSeverity) else: tableau_pr_sorted = tableau_pr_unsorted else: tableau_pr_sorted = tableau_pr_unsorted # Checklist list_candidate_cr=[] if found_cr: self.tableau_pr = tableau_pr_sorted # Dictionary containing checklist for each CR, not sorted. if self.isSwDomain(): cr_domain = "SCR" else: cr_domain = "CR" dico_cr_checklist = self.createChecklist(cr_domain, timeline=dico_time_capsule, list_candidate_cr=list_candidate_cr) else: dico_cr_checklist ={'domain':'SCR'} tableau_pr= [] list_cr_annex = [] if self.isSwDomain(): # Software domain tableau_pr.append(["Domain","CR Type","ID","Status","Synopsis","Severity"]) if found_cr: # Annex num_begin = ord("a") num_end = ord("z") num = num_begin prefix = "" print "list_candidate_cr",list_candidate_cr for cr_domain,cr_type,cr_id,cr_status,cr_synopsis,cr_severity in tableau_pr_sorted: # Patch # cr_id: 0001 etc. if cr_id in list_candidate_cr: line = "{:s}{:s}) Extract {:s} {:s}".format(prefix,chr(num),cr_domain,cr_id) print "LINE",line num += 1 if num > num_end: prefix += "a" num = num_begin list_cr_annex.append((line,'rb')) list_cr_annex.append(('','r')) tableau_pr.extend(tableau_pr_sorted) else: tableau_pr.append(["-","-","-","-","-","-"]) elif cr_with_parent: tableau_pr.append(["Domain","CR Type","ID","Status","Synopsis","Severity","Detected on","Implemented for","Parent CR","SW impact","HW impact","PLD impact"]) if not found_cr: tableau_pr.append(["-","-","-","-","-","-","-","-","-","-","-","-"]) else: tableau_pr.extend(tableau_pr_sorted) else: tableau_pr.append(["Domain","CR Type","ID","Status","Synopsis","Severity","Detected on","Implemented for","SW impact","HW impact","PLD impact"]) if not found_cr: tableau_pr.append(["-","-","-","-","-","-","-","-","-","-","-"]) else: tableau_pr.extend(tableau_pr_sorted) tableau_log = [["id","Log"],["--","--"]] # Action_items # Previous actions tbl_previous_actions = self.createTblPreviousActionsList(list_action_items,ccb_time) # Current actions tbl_current_actions = self.createTblActionsList(list_action_items,ccb_time) template_type = "CCB" item_description = self.getItemDescription(dico["item"]) ci_identification = self.get_ci_sys_item_identification(dico["system"], dico["item"]) if dico["component"] != "": title = "{:s} {:s} {:s} {:s}".format(self.system,dico["item"],dico["component"],template_type) subject = "{:s} {:s} {:s} {:s}".format(self.system,dico["item"],dico["component"],self.getTypeDocDescription(template_type)) elif dico["item"] != "": title = "{:s} {:s} {:s}".format(self.system,dico["item"],template_type) subject = "{:s} {:s} {:s}".format(self.system,dico["item"],self.getTypeDocDescription(template_type)) else: title = "{:s} {:s}".format(self.system,template_type) subject = "{:s} {:s}".format(self.system,self.getTypeDocDescription(template_type)) project_text = "The project is not defined" if dico["project"] != "": if len(list_projects) in (0,1) : project_text = "The project is {:s}".format(dico["project"]) else: text = "The projects are: " project_text = text + ", ".join(map(str, list_projects)) if dico["reference"] == "": if dico["component"] != "": tag_id = dico["component"] elif dico["item"] != "": tag_id = dico["item"] else: tag_id = dico["system"] reference = "CCB_Minutes_{:s}_001".format(tag_id) else: reference = dico["reference"] if self.isSwDomain(): # Software template_name = self._getTemplate("CCB") #if not cr_with_parent: colw_pr = [500, # Domain 500, # CR Type 500, # ID 500, # Synopsis 2500, 500] # 5000 = 100% if 0==1: colw_pr = [300, # Domain 300, # CR Type 300, # ID 500, # Status 2000, # Synopsis 400, # Severity 400, # Detected on 400, # Implemented for 400, # Parent CR 300] # 5000 = 100% else: # Hardware template_name = self._getTemplate("CCB_PLD","CCB_Minutes_HW_PLD_template.docx") if not cr_with_parent: colw_pr = [300, # Domain 300, # CR Type 300, # ID 500, # Status 2000, # Synopsis 400, # Severity 400, # Detected on 400, # Implemented for 300, # SW impact 300, # HW impact 300] # PLD impact 5000 = 100% else: colw_pr = [300, # Domain 300, # CR Type 300, # ID 500, # Status 2000, # Synopsis 400, # Severity 400, # Detected on 400, # Implemented for 400, # Parent CR 300, # SW impact 300, # HW impact 300] # PLD impact 5000 = 100% fmt_pr = { 'heading': True, 'colw': colw_pr, # 5000 = 100% 'cwunit': 'pct', 'tblw': 5000, 'twunit': 'pct', 'borders': {'all': {'color': 'auto','space': 0,'sz': 6,'val': 'single',}} } fmt_actions = { 'heading': True, 'colw': self.colw_actions, # 5000 = 100% 'cwunit': 'pct', 'tblw': 5000, 'twunit': 'pct', 'borders': {'all': {'color': 'auto','space': 0,'sz': 6,'val': 'single',}} } colw_log = [500,4500] # 5000 = 100% fmt_log = { 'heading': True, 'colw': colw_log, # 5000 = 100% 'cwunit': 'pct', 'tblw': 5000, 'twunit': 'pct', 'borders': {'all': {'color': 'auto','space': 0,'sz': 6,'val': 'single',}} } if dico["issue"] == "": issue = "1" else: issue = dico["issue"] if ccb_time_obj: #print "ccb_time_obj",ccb_time_obj #ccb_time = datetime.strftime("%d %b %Y",ccb_time_obj) #t = datetime(ccb_time_obj) ccb_time = ccb_time_obj else: ccb_time = time.strftime("%d %b %Y", time.localtime()) list_tags = { 'SUBJECT':{'type':'str','text':subject,'fmt':{}}, 'TITLE':{'type':'str','text':title,'fmt':{}}, 'CI_ID':{'type':'str','text':ci_identification,'fmt':{}}, 'REFERENCE':{'type':'str','text':reference,'fmt':{}}, 'ISSUE':{'type':'str','text':issue,'fmt':{}}, 'ITEM':{'type':'str','text':dico["item"],'fmt':{}}, 'ITEM_DESCRIPTION':{'type':'str','text':item_description,'fmt':{}}, 'DATE':{'type':'str','text':time.strftime("%d %b %Y", time.localtime()),'fmt':{}}, 'DATE_MEET':{'type':'str','text':ccb_time,'fmt':{}}, 'PROJECT':{'type':'str','text':project_text,'fmt':{}}, 'RELEASE':{'type':'str','text':dico["release"],'fmt':{}}, 'BASELINE':{'type':'str','text':dico["baseline"],'fmt':{}}, 'DOMAIN':{'type':'str','text':list_cr_domain_str,'fmt':{}}, 'WRITER':{'type':'str','text':dico["author"],'fmt':{}}, 'MAIL':{'type':'str','text':mail,'fmt':{}}, 'TEL':{'type':'str','text':tel,'fmt':{}}, 'SERVICE':{'type':'str','text':service,'fmt':{}}, 'COPIES':{'type':'str','text':"Nobody",'fmt':{}}, 'MISSING':{'type':'str','text':"Nobody",'fmt':{}}, 'TABLECHECKLIST':{'type':'mix','text':dico_cr_checklist,'fmt':self.fmt_chk}, 'TABLEPRS':{'type':'tab','text':tableau_pr,'fmt':fmt_pr}, 'PREVIOUS_ACTIONS':{'type':'tab','text':tbl_previous_actions,'fmt':fmt_actions}, 'CURRENT_ACTIONS':{'type':'tab','text':tbl_current_actions,'fmt':fmt_actions}, 'TABLELOGS':{'type':'tab','text':tableau_log,'fmt':fmt_log}, 'TABLEANNEX':{'type':'par','text':list_cr_annex,'fmt':{}} } #for pr in tableau_pr: # print "PR:",pr # print "LEN:",len(pr) #print "FMT:",fmt if dico["item"] != "": docx_filename = dico["system"] + "_" + dico["item"] + "_CR_" + template_type + "_Minutes_" + dico["reference"] + "_%f" % time.time() + ".docx" else: docx_filename = dico["system"] + "_CR_" + template_type + "_Minutes_" + dico["reference"] + "_%f" % time.time() + ".docx" self.ihm.docx_filename = docx_filename self.docx_filename,exception = self._createDico2Word(list_tags, template_name, docx_filename) return self.docx_filename,exception
def similarity_analyze(words, file_name): # word2vec model requires that wakati file has space after changing the line wakati = " ".join(words) + " " Tool.generate_file(wakati, file_name) similars = NlpKit.find_similarities(words, file_name) return similars
def __init__(self, binary): Tool.__init__(self, 'Compile', binary)
except AttributeError, e: # We might be running from RobotFramework from robot.libraries.BuiltIn import BuiltIn Config.imageBaseline = "%s/baseline" % os.path.dirname(BuiltIn().replace_variables('${SUITE SOURCE}')) # Cleanup previous runs try: shutil.rmtree(Config.resultDir) # Delete results directory except OSError, e: pass # Directory must have already been deleted # Create result directory structure os.makedirs(Config.resultDir + Config.resultAssetDir) # setup dest directory for tools Tool.setDestDir(Config.resultDir + Config.resultAssetDir) ## Inject Dependencies EntityLoggerProxy.setLogger(Logger()) EntityLoggerProxy.setFormatter(Formatter) Config.setLogger(EntityLoggerProxy) Config.setScreenshotLoggingLevel(INFO) SikuliFrameworkException.setConfig(Config) SikuliFrameworkException.setLogger(EntityLoggerProxy) Entity.setLogger(EntityLoggerProxy) Entity.setRegionFinderStrategy(Finder) Entity.setMultiResultProxyStrategy(MultiResultProxy) Entity.setSearcherStrategy(Searcher)
class TestToolMethods(unittest.TestCase): def setUp(self): self.c = Tool() pass def tearDown(self): pass def test_md5(self): string = 101212 self.assertEqual(self.c.md5(string), '4601f3ffaf1aa7c525b3d9f5a820ca80') def test_getMysqlConfig(self): dbconfig = self.c.getMysqlConfig() self.assertEqual(type(dbconfig), dict) self.assertTrue('host' in dbconfig) self.assertTrue('user' in dbconfig) self.assertTrue('passwd' in dbconfig) self.assertTrue('db' in dbconfig) def test_formatDate(self): str1 = u'2016/04/30 00:00:12' str2 = u'test 2016-04-30 00:00:12' str3 = u'sm2016年04月30日 00:00:12' distDate = u'2016-04-30' self.assertEqual(self.c.formatDate(str1), distDate) self.assertEqual(self.c.formatDate(str2), distDate) self.assertEqual(self.c.formatDate(str3), distDate) str4 = u'sm2016年04月30日 00:00:12' self.assertEqual(self.c.formatDate(str1, ' '), '2016 04 30') str5 = u'fdsjkkj20kaj3sjx-3' self.assertEqual(self.c.formatDate(str5), '0000-00-00') def test_convertSpecialUnicode(self): oldStr = "\\u002Dtest" self.assertEqual(self.c.convertSpecialUnicode(oldStr), '-test') def test_filterEmoji(self): str1 = u"This is a smiley \uD83C\uDFA6 face \uD860\uDD5D \uD860\uDE07 \uD860\uDEE2 \uD863\uDCCA \uD863\uDCCD \uD863\uDCD2 \uD867\uDD98" str1_true = u'This is a smiley ?? face ?? ?? ?? ?? ?? ?? ??' self.assertEqual(self.c.filterEmoji(str1), str1_true) def test_responseSuccess(self): data = {'test_id': 101212} data_true = { 'code': 200, 'msg': 'ok', 'data': data } self.assertEqual(json.loads(self.c.responseSuccess(data)), data_true) def test_responseError(self): data = {'test_id': 101212} data_true = { 'code': 500, 'msg': 'error', 'data': data } self.assertEqual(json.loads(self.c.responseError(code = 500, msg = 'error', data = data)), data_true)
def setUp(self): self.c = Tool() pass