def indent_code(self, code): """Accepts a string of code or a list of code lines""" # code mostly copied from ccode if isinstance(code, string_types): code_lines = self.indent_code(code.splitlines(True)) return "".join(code_lines) tab = " " inc_regex = ("^function ", "^if ", "^elseif ", "^else$", "^for ") dec_regex = ("^end$", "^elseif ", "^else$") # pre-strip left-space from the code code = [line.lstrip(" \t") for line in code] increase = [int(any([search(re, line) for re in inc_regex])) for line in code] decrease = [int(any([search(re, line) for re in dec_regex])) for line in code] pretty = [] level = 0 for n, line in enumerate(code): if line == "" or line == "\n": pretty.append(line) continue level -= decrease[n] pretty.append("%s%s" % (tab * level, line)) level += increase[n] return pretty
def check_date_format(date): """ Checks to see whether dates are in proper datetime format and converts times in ##/##/#### format to datetime or raises an error when it encounters a different format """ # check if date is already in the proper format datetime_pattern = re.compile(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$") # regex and its accompanying strptime format misc_date_formats = ( (re.compile(r"\d{2}/\d{2}/\d{4}\+\d{2}:\d{2}T\d{2}:\d{2}:\d{2}$"), "%m/%d/%Y+%H:%MT%H:%M:%S"), (re.compile(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$"), "%Y-%m-%dT%H:%M:%S"), (re.compile(r"\d{4}/\d{2}/\d{2}$"), "%d/%m/%YT%H:%M:%S"), (re.compile(r"\d{4}/\d{2}/\d{2}$"), "%/%d/%YT%H:%M:%S"), (re.compile(r"\d{2}/\d{2}/\d{4}\+\d{2}:\d{2}$"), "%m/%d/%Y+%H:%M"), (re.compile(r"\d{4}-\d{2}-\d{2}$"), "%Y-%m-%d"), (re.compile(r"\d{2}/\d{2}/\d{4}$"), "%d/%m/%Y"), (re.compile(r"\d{2}/\d{2}/\d{4}$"), "%m/%d/%Y"), ) matched = re.search(datetime_pattern, date) if matched: return date else: for date_format_tuple in misc_date_formats: matched = re.search(date_format_tuple[0], date) if matched: try: timestruct = time.strptime(date, date_format_tuple[1]) timedatetime = datetime.datetime.fromtimestamp(time.mktime(timestruct)) return timedatetime.strftime("%Y-%m-%dT%H:%M:%S") except ValueError: continue else: raise TypeError("unknown date format given: %s" % date)
def loadAccountInfo(self, user, req): validuntil = None trafficleft = None premium = None html = req.load("http://uploading.com/") premium = re.search(self.PREMIUM_PATTERN, html) is None m = re.search(self.VALID_UNTIL_PATTERN, html) if m: expiredate = m.group(1).strip() self.logDebug("Expire date: " + expiredate) try: validuntil = time.mktime(time.strptime(expiredate, "%b %d, %Y")) except Exception, e: self.logError(e) else: if validuntil > time.mktime(time.gmtime()): premium = True else: premium = False validuntil = None
def clean_up(self): """ Move DQ outputs to their appropriate directory """ try: data_dir = os.environ["DATA"] plots_dir = os.environ["PLOTS"] logs_dir = os.environ["LOGS"] except KeyError as detail: print "GenerateSpectrum.clean_up: error", detail, "not set" print " --> source analysis environment scripts before running!" sys.exit(1) for root, dirs, files in os.walk(os.getcwd()): for file in files: is_data = re.search(r".*\.root$", file) is_plot = re.search(r".*\.png$", file) hostname = socket.gethostname() is_log = re.search(r"^rat\."+hostname+r"\.[0-9]+\.log$", file) if is_data: try: root_file = TFile(file) tree = root_file.Get("T") tree.ls() except ReferenceError as detail: "generate_spectrum.clean_up: error in TFile,", detail sys.exit(1) file_manips.copy_file(os.path.join(root, file), data_dir) elif is_plot: file_manips.copy_file(os.path.join(root, file), plots_dir) elif is_log: file_manips.copy_file(os.path.join(root, file), logs_dir)
def resolve_resources(br, path): lecture = [] b_video = [] video = [] pdf = [] pptx = [] for l in br.links(): m_video = re.search(r'https:[\S]+download.mp4[\S]+\'', str(l)) m_pdf = re.search(r'https*:[\S]+/([\S]+\.pdf)', str(l)) m_pptx = re.search(r'https*:[\S]+/([\S]+\.pptx*)', str(l)) if m_video: b_video.append(m_video.group().rstrip("'")) if m_pdf: pdf.append([resolve_name_with_hex(m_pdf.group(1)), m_pdf.group()]) if m_pptx: pptx.append([resolve_name_with_hex(m_pptx.group(1)), m_pptx.group()]) for l in b_video: br.open(l) tmp_l = br.geturl() index = tmp_l.find('?') tmp_l = tmp_l[ : index] video.append(tmp_l) index = tmp_l.rfind('/') lecture.append(resolve_name_with_hex(tmp_l[index+1 :])) if len(lecture) == len(video): mp4 = zip(lecture, video) else: print 'Video names resolving error. Ignore videos...' mp4 = [] return mp4, pdf, pptx
def initialize_constants(): global __version__, __appname__, modules, functions, basenames, scripts src = open('src/calibre/constants.py', 'rb').read() nv = re.search(r'numeric_version\s+=\s+\((\d+), (\d+), (\d+)\)', src) __version__ = '%s.%s.%s'%(nv.group(1), nv.group(2), nv.group(3)) __appname__ = re.search(r'__appname__\s+=\s+(u{0,1})[\'"]([^\'"]+)[\'"]', src).group(2) epsrc = re.compile(r'entry_points = (\{.*?\})', re.DOTALL).\ search(open('src/calibre/linux.py', 'rb').read()).group(1) entry_points = eval(epsrc, {'__appname__': __appname__}) def e2b(ep): return re.search(r'\s*(.*?)\s*=', ep).group(1).strip() def e2s(ep, base='src'): return (base+os.path.sep+re.search(r'.*=\s*(.*?):', ep).group(1).replace('.', '/')+'.py').strip() def e2m(ep): return re.search(r'.*=\s*(.*?)\s*:', ep).group(1).strip() def e2f(ep): return ep[ep.rindex(':')+1:].strip() basenames, functions, modules, scripts = {}, {}, {}, {} for x in ('console', 'gui'): y = x + '_scripts' basenames[x] = list(map(e2b, entry_points[y])) functions[x] = list(map(e2f, entry_points[y])) modules[x] = list(map(e2m, entry_points[y])) scripts[x] = list(map(e2s, entry_points[y]))
def _apache_index(self, url): r = requests.get(url) if r.status_code != 200: raise ValueError(url+" status:"+str(r.status_code)) r.dirs = [] r.files = [] for l in r.content.split("\n"): # '<img src="/icons/folder.png" alt="[DIR]" /> <a href="7.0/">7.0/</a> 03-Dec-2014 19:57 - ' # ''<img src="/icons/tgz.png" alt="[ ]" /> <a href="owncloud_7.0.4-2.diff.gz">owncloud_7.0.4-2.diff.gz</a> 09-Dec-2014 16:53 9.7K <a href="owncloud_7.0.4-2.diff.gz.mirrorlist">Details</a>' # m = re.search("<a\s+href=[\"']?([^>]+?)[\"']?>([^<]+?)[\"']?</a>\s*([^<]*)", l, re.I) if m: # ('owncloud_7.0.4-2.diff.gz', 'owncloud_7.0.4-2.diff.gz', '09-Dec-2014 16:53 9.7K ') m1,m2,m3 = m.groups() if re.match("(/|\?|\w+://)", m1): # skip absolute urls, query strings and foreign urls continue if re.match("\.?\./?$", m1): # skip . and .. continue m3 = re.sub("[\s-]+$", "", m3) if re.search("/$", m1): r.dirs.append([m1, m3]) else: r.files.append([m1, m3]) return r
def create_movie_tiles_content(movies): '''Generates a string with the movie tile markup.''' # The HTML content for this section of the page content = '' for movie in movies: # Extract the youtube ID from the url youtube_id_match = re.search( r'(?<=v=)[^&#]+', movie.trailer_youtube_url ) youtube_id_match = youtube_id_match or re.search( r'(?<=be/)[^&#]+', movie.trailer_youtube_url ) trailer_youtube_id = youtube_id_match.group(0) if youtube_id_match else None # Append the tile for the movie with its content filled in content += MOVIE_TILE_CONTENT.format( movie_id=movie.movie_id, movie_title=movie.title, poster_image_url=movie.poster_image_url, trailer_youtube_id=trailer_youtube_id, actors=create_actor_list_content(movie.actors), year=movie.year, synopsis=movie.synopsis ) return content
def parser(content, option, outfp): score = 0 for lines in re.findall( "=+LINUX\s+DATA\s+BELOW\s*=+\n(.*?)\n\*\s+Trademarks", content, re.DOTALL): if lines: line_list = lines.splitlines() for i in range(0, len(line_list)): if re.search("MEMORY\s+INDEX", line_list[i]): memory_line = line_list[i] elif re.search("INTEGER\s+INDEX", line_list[i]): int_line = line_list[i] else: if re.search("FLOATING-POINT", line_list[i]): float_line = line_list[i] if option == "int": line_list.remove(memory_line) line_list.remove(float_line) score = int_line.split(":")[1].strip() elif option == "float": line_list.remove(int_line) line_list.remove(memory_line) score = float_line.split(":")[1].strip() else: if option == "memory": line_list.remove(int_line) line_list.remove(float_line) score = memory_line.split(":")[1].strip() for i in range(0, len(line_list)): outfp.write(line_list[i] + '\n') return score
def get_status(self): self.zd_up = 0 self.zd_pid = 0 self.zd_should_be_up = 0 self.zd_status = None resp = self.send_action("status") if not resp: return resp m = re.search("(?m)^application=(\d+)$", resp) if not m: return resp self.zd_up = 1 self.zd_pid = int(m.group(1)) self.zd_status = resp m = re.search("(?m)^should_be_up=(\d+)$", resp) if m: self.zd_should_be_up = int(m.group(1)) else: self.zd_should_be_up = 1 m = re.search("(?m)^testing=(\d+)$", resp) if m: self.zd_testing = int(m.group(1)) else: self.zd_testing = 0 return resp
def do_changes(self, resource): changes = self.list_changes(resource) changed = False if "state" in changes and changes["state"][0] != changes["state"][1]: action = "start" if changes["state"][1] == "stopped": action = "stop" # start or stop the service result = self._io.run("/sbin/service", [resource.name, action]) if re.search("^Failed", result[1]): raise Exception("Unable to %s %s: %s" % (action, resource.name, result[1])) changed = True if "enabled" in changes and changes["enabled"][0] != changes["enabled"][1]: action = "on" if changes["enabled"][1] == False: action = "off" result = self._io.run("/sbin/chkconfig", [resource.name, action]) changed = True if re.search("^Failed", result[1]): raise Exception("Unable to %s %s: %s" % (action, resource.name, result[1])) return changed
def do_changes(self, resource): changes = self.list_changes(resource) changed = False if "state" in changes and changes["state"][0] != changes["state"][1]: action = "start" if changes["state"][1] == "stopped": action = "stop" # start or stop the service result = self._io.run("/usr/bin/systemctl", [action, "%s.service" % resource.name]) if re.search("^Failed", result[1]): raise Exception("Unable to %s %s: %s" % (action, resource.name, result[1])) changed = True if "enabled" in changes and changes["enabled"][0] != changes["enabled"][1]: action = "enable" if changes["enabled"][1] == False: action = "disable" result = self._io.run("/usr/bin/systemctl", [action, "%s.service" % resource.name]) changed = True if re.search("^Failed", result[1]): raise Exception("Unable to %s %s: %s" % (action, resource.name, result[1])) return changed
def parse_contact(raw): contact_details = {'name': '', 'email': ''} links = [] match_mail = re.search(contact_mail_pattern, raw) match_url = re.search(contact_url_pattern, raw) match_intern = re.search(contact_intern_pattern, raw) if match_mail: if re.match(mail_address_pattern, match_mail.group('email')): # found an email address contact_details = {'name': match_mail.group('name'), 'email': match_mail.group('email')} else: contact_details = {'name': match_mail.group('name'), 'email': ''} elif match_url: # found a hyperlink links.append({'ref': 'contact', 'href': match_url.group('url')}) elif match_intern: # found a link to the wiki page '/contact' contact_details = {'name': ' / '.join(name for name in match_intern.groupdict().values() if (name is not None)), 'email': ''} else: name = raw.replace("[", "").replace("]", "").replace("|", "").strip() if name: # found a name contact_details = {'name': name, 'email': ''} else: # found nothing pass return contact_details, links
def GenVerilog(self,fp,config): for bodyextension in ('_Rx.v','_Tx.v',): body = self.LoadCore(self.peripheralFile,bodyextension); if hasattr(self,'RTR') or hasattr(self,'RTRn'): body = re.sub(r'@[email protected]\n','',body); body = re.sub(r'@[email protected]\n','',body); else: if re.search(r'@[email protected]',body): body = re.sub(r'@[email protected]*[email protected][email protected]\n','',body,flags=re.DOTALL); for subpair in ( ( r'@[email protected]', self.RTR if hasattr(self,'RTR') else self.RTRn if hasattr(self,'RTRn') else '', ), ( r'@[email protected]', '' if hasattr(self,'RTR') else '!', ), ( r'\bL__', '[email protected]@__', ), ( r'\bgen__', '[email protected]@__', ), ( r'\bs__', '[email protected]@__', ), ( r'@[email protected]', self.insignal, ), ( r'@[email protected]', str(self.baudmethod), ), ( r'@[email protected]', str(self.sync), ), ( r'@[email protected]', str(self.deglitch), ), ( r'@[email protected]', str(self.inFIFO), ), ( r'@[email protected]', self.CTS if hasattr(self,'CTS') else ('!%s' % self.CTSn) if hasattr(self,'CTSn') else '1\'b1', ), ( r'@[email protected]', str(self.nStop), ), ( r'@[email protected]', str(self.outFIFO), ), ( r'@[email protected]', self.namestring, ), ): if re.search(subpair[0],body): body = re.sub(subpair[0],subpair[1],body); body = self.GenVerilogFinal(config,body); fp.write(body);
def get_mediaid(self): match = re.search(r"mediaId = '([^']+)';", self.get_urldata()) if not match: match = re.search(r'media-id="([^"]+)"', self.get_urldata()) if not match: match = re.search(r'screen9-mid="([^"]+)"', self.get_urldata()) if not match: match = re.search(r'data-id="([^"]+)"', self.get_urldata()) if not match: match = re.search(r'data-id=([^ ]+) ', self.get_urldata()) if not match: match = re.search(r'data-videoid="([^"]+)"', self.get_urldata()) if not match: match = re.search('s.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)', self.get_urldata()) if match: data = self.http.request("get", match.group(1)) match = re.search(r'mediaid": "([^"]+)"', data.text) if not match: match = re.search('iframe src="(//csp.screen9.com[^"]+)"', self.get_urldata()) if match: url = "http:{0}".format(match.group(1)) data = self.http.request("get", url) match = re.search(r"mediaid: '([^']+)'", data.text) if not match: urlp = urlparse(self.url) match = urlp.fragment return match
def parse_changelog(): with open('CHANGES') as f: lineiter = iter(f) for line in lineiter: match = re.search('^Version\s+(.*)', line.strip()) if match is None: continue version = match.group(1).strip() if lineiter.next().count('-') != len(line.strip()): fail('Invalid hyphen count below version line: %s', line.strip()) while 1: released = lineiter.next().strip() if released: break match = re.search(r'Released (\w+\s+\d+\w+\s+\d+)', released) if match is None: fail('Could not find release date in version %s' % version) datestr = parse_date(match.group(1).strip()) return version, datestr
def loginWithCheckCode(self): checkcode = raw_input('请输入验证码') self.post['TPL_checkcode'] = checkcode self.postData = urllib.urlencode(self.post) try: request = urllib2.Request(self.loginURL,self.postData,self.loginHeaders) response = self.opener.open(request) content = response.read().decode('gbk') pattern = re.compile(u'\u9a8c\u8bc1\u7801\u9519\u8bef',re.S) result = re.search(pattern,content) if result: print u"验证码输入错误" return False else: tokenPattern = re.compile('id="J_HToken" value="(.*?)"') tokenMatch = re.search(tokenPattern,content) if tokenMatch: print u"验证码输入正确" print tokenMatch.group(1) return tokenMatch.group(1) else: print u"J_Token" return False except urllib2.HTTPError,e: print u"出错",e.reason return False
def nameQuality(name): name = os.path.basename(name) # if we have our exact text then assume we put it there for x in Quality.qualityStrings: if x == Quality.UNKNOWN: continue regex = '\W'+Quality.qualityStrings[x].replace(' ','\W')+'\W' regex_match = re.search(regex, name, re.I) if regex_match: return x checkName = lambda list, func: func([re.search(x, name, re.I) for x in list]) if checkName(["pdtv.xvid", "hdtv.xvid", "dsr.xvid"], any) and not checkName(["720p"], all): return Quality.SDTV elif checkName(["dvdrip.xvid", "bdrip.xvid", "dvdrip.divx", "dvdrip.ws.xvid"], any) and not checkName(["720p"], all): return Quality.SDDVD elif checkName(["720p", "hdtv", "x264"], all) or checkName(["hr.ws.pdtv.x264"], any): return Quality.HDTV elif checkName(["720p", "web.dl"], all) or checkName(["720p", "itunes", "h.?264"], all): return Quality.HDWEBDL elif checkName(["720p", "bluray", "x264"], all) or checkName(["720p", "hddvd", "x264"], all): return Quality.HDBLURAY elif checkName(["1080p", "bluray", "x264"], all) or checkName(["1080p", "hddvd", "x264"], all): return Quality.FULLHDBLURAY else: return Quality.UNKNOWN
def ftp_download(): ftp_host = 'ftp.uniprot.org' ftp_user = '******' ftp_pass = '' ftp_path = '/pub/databases/uniprot/current_release/knowledgebase/reference_proteomes' ftp = FTP(ftp_host) ftp.login(ftp_user, ftp_pass) ftp.getwelcome() ftp.cwd(ftp_path) dirs = ftp.nlst() # print(dirs) p = 0 # Navigate to the required directory and thereby download data. for dir in dirs: if re.search(species, dir): path = ftp_path + '/' + str(species) # print(path) ftp.cwd(path) types = ftp.nlst() for x in types: if not re.search('DNA.fasta.gz', x) and re.search('fasta.gz', x): final = path + '/' + str(x) # print(final) fullfilename = os.path.join(store + str(x)) urllib.urlretrieve('ftp://' + ftp_host + str(final), fullfilename) p+=1 else: pass print("Number of viruses: " + str(p)) print(ftp.pwd())
def annotation2arg(self, arg, annotation): '''Convert GObject-introspection annotations to arg options''' if 'allow-none' in annotation: arg[2]['optional'] = True if re.search(r'\(\s*out\s*\)', annotation): arg[2]['out'] = True if re.search(r'\(\s*in\s*\)', annotation): arg[2]['in'] = True m = re.search(r'\(\s*default\s*([^ )]*)\s*\)', annotation) if m: prefix = '' if is_boolean(arg): prefix = 'b:' elif is_int(arg, self.binding_data): prefix = 'c:' else: raise Exception('should not happen: could not found type for default: ' + annotation) arg[2]['default'] = prefix + m.group(1) arg[2]['optional'] = True m = re.search(r'\(\s*element-type\s+(\w+)(?:\s+(\w+))?', annotation) if m: if len(m.groups()) > 2: arg[2]['key-type'] = \ convert_type_from_gobject_annotation(m.group(1)) arg[2]['value-type'] = \ convert_type_from_gobject_annotation(m.group(2)) else: arg[2]['element-type'] = \ convert_type_from_gobject_annotation(m.group(1)) m = re.search(r'\(\s*transfer\s+(\w+)', annotation) if m: arg[2]['transfer'] = m.group(1)
def valid_password(password): ''' Verify the password meets complexity requirements. ''' return len(password) >= 8 and \ re.search(_LOWER_ALPHA, password) and \ re.search(_UPPER_ALPHA, password) and \ re.search(_NUMERIC, password)
def status_recv(self): try: tmp = str(self.cxi.recv(1024), 'utf-8') except (socket.error, socket.timeout): print ("ERROR:socket got disconnected") return -1, "socket timeout/disconnected" if DEBUG: print ("recv got, ",tmp) if tmp == 0: return -1, "socket timeout/disconnected" if len(tmp) == 0: return -1, "did not receive anything" ## out of ribbon if re.search("^o",tmp) != None: return -1, "Out of ribbon" ## out of paper if re.search("^O",tmp) != None: return -1, "Out of paper" ## printing error if re.search("^ERROR", tmp) != None: return -1, "some ERROR with status check" ## print is done if re.search("^R00000",tmp) != None: return 1, "Success" return -1,tmp
def _parse_alt(self, str): if re.search('[\[\]]', str) is not None: # Paired breakend items = re.split('[\[\]]', str) remoteCoords = items[1].split(':') chr = remoteCoords[0] if chr[0] == '<': chr = chr[1:-1] withinMainAssembly = False else: withinMainAssembly = True pos = remoteCoords[1] orientation = (str[0] == '[' or str[0] == ']') remoteOrientation = (re.search('\[', str) is not None) if orientation: connectingSequence = items[2] else: connectingSequence = items[0] return _Breakend(chr, pos, orientation, remoteOrientation, connectingSequence, withinMainAssembly) elif str[0] == '.' and len(str) > 1: return _SingleBreakend(True, str[1:]) elif str[-1] == '.' and len(str) > 1: return _SingleBreakend(False, str[:-1]) elif str[0] == "<" and str[-1] == ">": return _SV(str[1:-1]) else: return _Substitution(str)
def process_page(self, page): talk_page = page page = talk_page.toggleTalkPage() #find the edit where {{good article}] was added found_oldid = False oldid = None while not found_oldid: self.site.loadrevisions(page, getText=True, rvdir=False, step=10, total=10, startid=oldid) hist = page.fullVersionHistory(total=10) # This should fetch nothing... for revision in hist: if re.search('\{\{(good|ga) article\}\}', revision[3], re.IGNORECASE): oldid = revision[0] else: #current oldid is the right one found_oldid = True break #add the oldid in the template if not oldid: self.output('* ERROR: Could not find oldid for [[%s]]' % talk_page.title()) return self.output('* Adding |oldid=%s to [[%s]]' % (oldid, talk_page.title())) oldtext = talk_page.get() search = re.search('\{\{GA\s?\|(.*?)\}\}', oldtext) newtext = oldtext.replace(search.group(0), '{{GA|%s|oldid=%s}}' % (search.group(1), oldid)) pywikibot.showDiff(oldtext, newtext) talk_page.put(newtext, 'BOT: Adding |oldid=%s to {{[[Template:GA|GA]]}}' % oldid)
def _get_snippet_bounds(self): init_snippet, end_snippet = self._textline_bounds() while not re.search(r' *>>> ', self.doc.get_text(init_snippet, end_snippet)): if not re.search(r' *>>> | *... ', self.doc.get_text(init_snippet, end_snippet)): return end_snippet, end_snippet init_snippet.backward_line() return init_snippet, end_snippet
def get_topic(target_branch): branch_name = get_branch_name(target_branch) branch_parts = branch_name.split("/") if len(branch_parts) >= 3 and branch_parts[0] == "review": return use_topic("Using change number %s " "for the topic of the change submitted", "/".join(branch_parts[2:])) log_output = run_command("git log HEAD^1..HEAD") bug_re = r'\b([Bb]ug|[Ll][Pp])\s*[:]?\s*[#]?\s*(\d+)' match = re.search(bug_re, log_output) if match is not None: return use_topic("Using bug number %s " "for the topic of the change submitted", "bug/%s" % match.group(2)) bp_re = r'\b([Bb]lue[Pp]rint|[Bb][Pp])\s*[#:]?\s*([0-9a-zA-Z-_]+)' match = re.search(bp_re, log_output) if match is not None: return use_topic("Using blueprint number %s " "for the topic of the change submitted", "bp/%s" % match.group(2)) return use_topic("Using local branch name %s " "for the topic of the change submitted", branch_name)
def get_user_info(self, uid): """ 获取用户基本信息 :param uid: 用户id :return: 用户基本信息 """ user_info_url = 'http://weibo.cn/%s/info' % uid user_info_page = self.get_page(user_info_url) sex_pattern = re.compile('性别:(.*?)<br/>') area_pattern = re.compile('地区:(.*?)<br/>') birth_pattern = re.compile('生日:(\d*?)-.*?<br/>') sex = re.search(sex_pattern, user_info_page) area = re.search(area_pattern, user_info_page) birth = re.search(birth_pattern, user_info_page) if sex: sex = sex.group(1) if area: area = area.group(1) if birth: birth = birth.group(1) if int(birth) != 0001: # 将年龄为微博默认设置的用户过滤 info = {'性别': sex, '地区': area, '年龄': 2016-int(birth)} return info info = {'性别': sex, '地区': area, '年龄': None} return info
def check_filter(self, mac, network): if self.config["filter_type"][network["type"]] == False: return False crypts = decode_cryptset(network["cryptset"]) if crypts == ["none"]: crypt = "none" elif "aes_ccm" in crypts or "aes_ocb" in crypts: crypt = "wpa2" elif "wpa" in crypts: crypt = "wpa" elif "wep" in crypts: crypt = "wep" else: crypt = "other" if self.config["filter_crypt"][crypt] == False: return False if self.config["filter_regexpr"]["ssid"] != "": if re.search(r"%s" % self.config["filter_regexpr"]["ssid"], network["ssid"]) is None: return False if self.config["filter_regexpr"]["bssid"] != "": if re.search(r"%s" % self.config["filter_regexpr"]["bssid"], mac) is None: return False return True
def _CheckLGTMsForPublicAPI(input_api, output_api): """Check LGTMs for public API changes. For public API files make sure there is an LGTM from the list of owners in PUBLIC_API_OWNERS. """ results = [] requires_owner_check = False for affected_file in input_api.AffectedFiles(): affected_file_path = affected_file.LocalPath() file_path, file_ext = os.path.splitext(affected_file_path) # We only care about files that end in .h and are under the top-level # include dir. if file_ext == ".h" and "include" == file_path.split(os.path.sep)[0]: requires_owner_check = True if not requires_owner_check: return results lgtm_from_owner = False issue = input_api.change.issue if issue and input_api.rietveld: issue_properties = input_api.rietveld.get_issue_properties(issue=int(issue), messages=True) if re.match(REVERT_CL_SUBJECT_PREFIX, issue_properties["subject"], re.I): # It is a revert CL, ignore the public api owners check. return results if re.search(r"^COMMIT=false$", issue_properties["description"], re.M): # Ignore public api owners check for COMMIT=false CLs since they are not # going to be committed. return results match = re.search(r"^TBR=(.*)$", issue_properties["description"], re.M) if match: tbr_entries = match.group(1).strip().split(",") for owner in PUBLIC_API_OWNERS: if owner in tbr_entries or owner.split("@")[0] in tbr_entries: # If an owner is specified in the TBR= line then ignore the public # api owners check. return results if issue_properties["owner_email"] in PUBLIC_API_OWNERS: # An owner created the CL that is an automatic LGTM. lgtm_from_owner = True messages = issue_properties.get("messages") if messages: for message in messages: if message["sender"] in PUBLIC_API_OWNERS and "lgtm" in message["text"].lower(): # Found an lgtm in a message from an owner. lgtm_from_owner = True break if not lgtm_from_owner: results.append( output_api.PresubmitError( "Since the CL is editing public API, you must have an LGTM from " "one of: %s" % str(PUBLIC_API_OWNERS) ) ) return results
def initialize(): try: print '[!] Preparing SMB2 listener...' pkt =("\x00\x00\x00\x01") sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # bind and listen for a connection sock.bind(("", 445)) try: print '[!] Waiting for connection...' sock.listen(1) connection, addr = sock.accept() except KeyboardInterrupt: return print '[!] Connection from %s, waiting for negotiation...'%str(addr) while True: try: npkt = sock.recv(1024) # we're responding to the negotiation packet if npkt[8] == 'r': sock.send(pkt) break except Exception, j: Error('Connection error [%s]'%j) break sock.close() print '[!] Complete, checking remote address...' rval = commands.getoutput('ping -c 1 -w 1 %s'%addr[0]) up = re.search('\d.*? received', rval) if re.search('0', up.group(0)) is None: Msg('Host appears to be up') else: print '[+] Host is not responding - it is either down or rejecting our probes.'