def spide_for_lightoj(user_list): data = parse.urlencode(post_data).encode('utf-8') cookie = cookiejar.CookieJar() opener = request.build_opener(request.HTTPCookieProcessor(cookie)) request.install_opener(opener) try: Req = request.Request(url=lightoj_login_url, data=data, headers=headers) Res = request.urlopen(Req) except Exception: return user_list else: page_tot = page_count() if page_tot == get_data_failed: return for page in range(page_tot): url = lightoj_userlist_url + str(page*50+1) data = decompress(spide(url)) if data is None: continue data = data.decode('utf-8') user_one = match(lightoj_tr_data_one_pattern, data) user_two = match(lightoj_tr_data_two_pattern, data) get(user_list, user_one, lightoj_user_data_one_pattern, lightoj_td_data_one_pattern) get(user_list, user_two, lightoj_user_data_two_pattern, lightoj_td_data_two_pattern)
def check_entry(self, first_line_checked, entry_lines): if not entry_lines: return for line in entry_lines: if parse_bug_id_from_changelog(line): break if searchIgnorecase("Unreviewed", line): break if searchIgnorecase("build", line) and searchIgnorecase("fix", line): break else: self.handle_style_error(first_line_checked, "changelog/bugnumber", 5, "ChangeLog entry has no bug number") # check file change descriptions for style violations line_no = first_line_checked - 1 for line in entry_lines: line_no = line_no + 1 # filter file change descriptions if not match('\s*\*\s', line): continue if search(':\s*$', line) or search(':\s', line): continue self.handle_style_error(line_no, "changelog/filechangedescriptionwhitespace", 5, "Need whitespace between colon and description") # check for a lingering "No new tests. (OOPS!)" left over from prepare-changeLog. line_no = first_line_checked - 1 for line in entry_lines: line_no = line_no + 1 if match('\s*No new tests. \(OOPS!\)$', line): self.handle_style_error(line_no, "changelog/nonewtests", 5, "You should remove the 'No new tests' and either add and list tests, or explain why no new tests were possible.")
def get(user_list, user, user_name_pattern, ac_number_pattern): for item in user: username = match(user_name_pattern, item)[0].lstrip().rstrip() ac_number = match(ac_number_pattern, item) for i in range(2, len(ac_number), 4): print('get', username, 'AC:', ac_number[i].lstrip().rstrip()) user_list.append((username, ac_number[i].lstrip().rstrip()))
def search(self, i): cur = self.db.cursor(); persons_per_page = 20; dep = ( i["page"] - 1 ) * persons_per_page; firstName_str, lastName_str, sexe_str, profession_str, address_str, email_str, phone_str, birthCity_str, birthday_str, personId_str = '1=1', '1=1', '1=1', '1=1', '1=1', '1=1', '1=1', '1=1', '1=1', '1=1' if i["name"]: firstName = match( i["name"], "(([A-Z]? *)*[a-z]+)*" ) lastName = match( i["name"], "[A-Z]{2}[A-Z ]*" ) if (firstName): firstName_str = "firstName SOUNDS LIKE '%s'" % ( '%' + firstName + '%' ) if (lastName): lastName_str = "lastName SOUNDS LIKE '%s'" % ( '%' + lastName + '%' ) if i["sexe"] == 0 or i["sexe"] == 1: sexe_str = 'sexe = %s' % i["sexe"] if i["profession"]: profession_str = 'profession LIKE %s' % ("'%" + i["profession"] + "%'") if i['personId']: personId_str = "id LIKE '%s'" % ( str( i['personId'] ) + '%') if i["address"]: address_str = 'address LIKE %s' % ("'%" + i["address"] + "%'") if i["email"]: email_str = 'email LIKE %s' % ("'%" + i["email"] + "%'") if i["phone"]: phone_str = 'phone LIKE %s' % ("'%" + i["phone"] + "%'") if i["birthCity"]: birthCity_str = 'birthCity LIKE %s' % ("'%" + i["birthCity"] + "%'") if i["birthday"]: birthday_str = 'birthday LIKE %s' % ("'%" + i["birthday"] + "%'") query = """SELECT firstName, lastName, id as personId FROM persons WHERE %s AND %s AND %s AND %s AND %s AND %s AND %s AND %s AND %s AND %s LIMIT %s, %s """ % (firstName_str, lastName_str, sexe_str, profession_str, personId_str, address_str, email_str, phone_str, birthday_str, birthday_str, dep, persons_per_page ) cur.execute( query ) self.log( "9", "") rows = cur.fetchall() images = '' for row in rows: images += self.get_default_face( row["personId"], (48,48) ) images += "<smoky-image>" return str_dict( rows ), images
def spide_for_sgu(username): url = sgu_url data = decompress(spide(url+username)) if data is None: return get_data_failed data = data.decode('utf-8') table_data = match(sgu_table_pattern, data) if table_data is None or len(table_data) < 1: return match_user_failed tr_data = match(sgu_tr_pattern, table_data[0])[17] return match(sgu_ac_number_pattern, tr_data)[0]
def spide_for_acdream(username): url = acdream_url data = decompress(spide(url+username)) if data is None: return get_data_failed data = data.decode('utf-8') ul_data = match(acdream_ul_pattern, data) if ul_data is None or len(ul_data) < 1: return match_user_failed ul_data = ul_data[0] ac_number = match(acdream_ac_number_pattern, ul_data) return ac_number[0]
def spide_for_acdream(username): url = acdream_url data = decompress(spide(url + username)) if data is None: return get_data_failed data = data.decode('utf-8') ul_data = match(acdream_ul_pattern, data) if ul_data is None or len(ul_data) < 1: return match_user_failed ul_data = ul_data[0] ac_number = match(acdream_ac_number_pattern, ul_data) return ac_number[0]
def _check_list_order(self, lines): last_line = None line_number = 0 for line in lines: matched = search('\$\{.*\}', line) if matched: continue line_number += 1 line = line.strip() if last_line == None: matched = match('(set\(|list\((APPEND|REMOVE_ITEM) )(?P<name>\w+)(?P<item>\s+\w+)?$', line) if matched: # FIXME: Add handling for include directories. if 'INCLUDE_DIRECTORIES' in matched.group('name'): continue empty_lines_count = 0 last_line = '' if matched.group('item'): msg = 'First listitem "%s" should be in a new line.' % matched.group('item').strip() self._handle_style_error(line_number, 'list/parentheses', 5, msg) else: matched = match('(?P<item>.+)?\)$', line) if matched: last_line = None if matched.group('item'): msg = 'The parentheses after the last listitem "%s" should be in a new line.' % matched.group('item').strip() self._handle_style_error(line_number, 'list/parentheses', 5, msg) elif line == '': empty_lines_count += 1 else: last_line_path = self._list_item_path(last_line) line_path = self._list_item_path(line) if line == last_line: msg = 'The item "%s" should be added only once to the list.' % line self._handle_style_error(line_number, 'list/duplicate', 5, msg) elif line_path < last_line_path or line_path == last_line_path and line < last_line: msg = 'Alphabetical sorting problem. "%s" should be before "%s".' % (line, last_line) self._handle_style_error(line_number, 'list/order', 5, msg) elif last_line != '': if line_path != last_line_path: if empty_lines_count != 1: msg = 'There should be exactly one empty line instead of %d between "%s" and "%s".' % (empty_lines_count, last_line, line) self._handle_style_error(line_number, 'list/emptyline', 5, msg) elif empty_lines_count != 0: msg = 'There should be no empty line between "%s" and "%s".' % (last_line, line) self._handle_style_error(line_number, 'list/emptyline', 5, msg) last_line = line empty_lines_count = 0
def find_user(user_list, page_url): data = decompress(spide(page_url)) if data is None: return get_data_failed data = data.decode('utf-8') td_data = match(noj_td_pattern, data) for i in range(2, len(td_data), 6): cur_username = match(noj_username_pattern, td_data[i])[0] print('get ', cur_username) if int(td_data[i+3]) == 0: return False user_list.append((cur_username, td_data[i+3])) return True
def spide_for_hdu(username): url = hdu_url + username data = decompress(spide(url)) if data is None: return get_data_failed data = data.decode('gb2312') table_data = match(hdu_table_pattern, data) if table_data is None: return match_user_failed table_data = table_data[0] td_data = match(hdu_td_pattern, table_data) for i in range(2, len(td_data), 6): if td_data[i] == username: return td_data[i+3] return match_user_failed
def spide_for_zoj(username): url = zoj_url url = get_user_url(username) if url == get_data_failed: return get_data_failed if url == match_user_failed: return match_user_failed data = decompress(spide(url.format(username))) if data is None: return get_data_failed data = data.decode('utf-8') div_data = match(zoj_div_pattern, data)[0] ac_number = match(zoj_ac_pattern, div_data)[0] char_pos = ac_number.index('/') return ac_number[:char_pos]
def spide_for_hdu(username): url = hdu_url + username data = decompress(spide(url)) if data is None: return get_data_failed data = data.decode('gb2312') table_data = match(hdu_table_pattern, data) if table_data is None: return match_user_failed table_data = table_data[0] td_data = match(hdu_td_pattern, table_data) for i in range(2, len(td_data), 6): if td_data[i] == username: return td_data[i + 3] return match_user_failed
def page_count(): data = decompress(spide(lightoj_userlist_url)) if data is None: return get_data_failed data = data.decode('utf-8') page = match(lightoj_page_count_pattern, data)[-1:][0] return int(page.lstrip().rstrip())
def spide_for_ural(username): url = ural_url data = decompress(spide(url + username)) if data is None: return get_data_failed data = data.decode('utf-8') table_data = match(ural_table_pattern, data) if table_data is None: return match_user_failed table_data = table_data[0] tr_data = match(ural_tr_pattern, table_data)[1:] for item in tr_data: cur_username = match(ural_user_pattern, item)[0] if cur_username == username: ac_number = match(ural_ac_number_pattern, item)[2] return ac_number return match_user_failed
def get_page_count(url): data = decompress(spide(url+'1')) if data is None: return get_data_failed data = data.decode('utf-8') page_info = match(noj_page_count_pattern, data)[0] char_pos = page_info.index('=') return int(page_info[char_pos+1:])
def spide_for_ural(username): url = ural_url data = decompress(spide(url+username)) if data is None: return get_data_failed data = data.decode('utf-8') table_data = match(ural_table_pattern, data) if table_data is None: return match_user_failed table_data = table_data[0] tr_data = match(ural_tr_pattern, table_data)[1:] for item in tr_data: cur_username = match(ural_user_pattern, item)[0] if cur_username == username: ac_number = match(ural_ac_number_pattern, item)[2] return ac_number return match_user_failed
def __init__(self, table_id=None, match=None, duration_sec=None, duration_nsec=None, priority=None, idle_timeout=None, hard_timeout=None, cookie=None, packet_count=None, byte_count=None, actions=None): if table_id != None: self.table_id = table_id else: self.table_id = 0 if match != None: self.match = match else: self.match = common.match() if duration_sec != None: self.duration_sec = duration_sec else: self.duration_sec = 0 if duration_nsec != None: self.duration_nsec = duration_nsec else: self.duration_nsec = 0 if priority != None: self.priority = priority else: self.priority = 0 if idle_timeout != None: self.idle_timeout = idle_timeout else: self.idle_timeout = 0 if hard_timeout != None: self.hard_timeout = hard_timeout else: self.hard_timeout = 0 if cookie != None: self.cookie = cookie else: self.cookie = 0 if packet_count != None: self.packet_count = packet_count else: self.packet_count = 0 if byte_count != None: self.byte_count = byte_count else: self.byte_count = 0 if actions != None: self.actions = actions else: self.actions = [] return
def spide_for_poj(username): url = poj_url if len(username) <= 1: return match_user_failed data = decompress(spide(url.format(username))) if data is None: return get_data_failed data = data.decode('utf-8') table_data = match(poj_table_pattern, data) if table_data is None or len(table_data) < 1: return match_user_failed td_data = match(poj_td_pattern, table_data[0]) for i in range(0, len(td_data), 6): cur_username = match(poj_username_pattern, td_data[i])[0] if cur_username.lower() != username.lower(): continue return td_data[i+4] return match_user_failed
def spide_for_bzoj(username): url = bzoj_url data = decompress(spide(url+username)) if data is None: return get_data_failed data = data.decode('utf-8') if data[-13:-1] == 'No such User': return match_user_failed ac_number = match(bzoj_ac_pattern, data)[0] return ac_number
def get_user_url(username): url = zoj_url data = decompress(spide(url.format(username))) if data is None: return get_data_failed data = data.decode('utf-8') user_url = match(zoj_user_pattern, data) if user_url is None or len(user_url) == 0: return match_user_failed return 'http://www.icpc.moe' + user_url[0]
def spide_for_bzoj(username): url = bzoj_url data = decompress(spide(url + username)) if data is None: return get_data_failed data = data.decode('utf-8') if data[-13:-1] == 'No such User': return match_user_failed ac_number = match(bzoj_ac_pattern, data)[0] return ac_number
def spide_for_fzu(username): url = fzu_url + username data = decompress(spide(url)) if data is None: return get_data_failed data = data.decode('utf-8') ac_number = match(fzu_ac_number_pattern, data) if ac_number is None or len(ac_number) < 1: return match_user_failed return ac_number[1]
def _process_line(self, line_number, line_content): if match('(^|\ +)#', line_content): # ignore comment line return l = line_content.expandtabs(4) # check command like message( "testing") if search('\(\ +', l): self._handle_style_error(line_number, 'whitespace/parentheses', 5, 'No space after "("') # check command like message("testing" ) if search('\ +\)', l) and not search('^\ +\)$', l): self._handle_style_error(line_number, 'whitespace/parentheses', 5, 'No space before ")"') self._check_trailing_whitespace(line_number, l) self._check_no_space_cmds(line_number, l) self._check_one_space_cmds(line_number, l) self._check_indent(line_number, line_content)
parser.add_argument('--ratio', type=float, help='ratio test explained by D.Lowe') if __name__ == "__main__": args = parser.parse_args() train, feature_name_train, kp_train, des_train = loadKeyPoints(args.r) query, feature_name_query, kp_query, des_query = loadKeyPoints(args.i) assert feature_name_train == feature_name_query _, norm = init_feature(feature_name_query) ratio = args.ratio if ratio is None: ratio = 0.75 # Match both ways two_sides_matches = match(norm, args.flann, des_train, des_query, ratio) MIN_MATCH_COUNT = 3 matchesMask = None if len(two_sides_matches) > MIN_MATCH_COUNT: src_pts = np.float32([ kp_train[m.queryIdx].pt for m in two_sides_matches ]).reshape(-1, 1, 2) dst_pts = np.float32([ kp_query[m.trainIdx].pt for m in two_sides_matches ]).reshape(-1, 1, 2) M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC) matchesMask = mask.ravel().tolist() logging.debug("%s : Matches Mask %d", datetime.datetime.now(),