Example #1
1
    def get_all_names(self):
        """Returns all names found in the Nginx Configuration.

        :returns: All ServerNames, ServerAliases, and reverse DNS entries for
                  virtual host addresses
        :rtype: set

        """
        all_names = set()

        # Kept in same function to avoid multiple compilations of the regex
        priv_ip_regex = r"(^127\.0\.0\.1)|(^10\.)|(^172\.1[6-9]\.)|" r"(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^192\.168\.)"
        private_ips = re.compile(priv_ip_regex)
        hostname_regex = r"^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*[a-z]+$"
        hostnames = re.compile(hostname_regex, re.IGNORECASE)

        for vhost in self.parser.get_vhosts():
            all_names.update(vhost.names)

            for addr in vhost.addrs:
                host = addr.get_addr()
                if hostnames.match(host):
                    # If it's a hostname, add it to the names.
                    all_names.add(host)
                elif not private_ips.match(host):
                    # If it isn't a private IP, do a reverse DNS lookup
                    # TODO: IPv6 support
                    try:
                        socket.inet_aton(host)
                        all_names.add(socket.gethostbyaddr(host)[0])
                    except (socket.error, socket.herror, socket.timeout):
                        continue

        return all_names
Example #2
1
    def testSendMessageWithSpecifiedAddresses(self):
        # Make sure addresses specified in call override those in message.
        m = email.mime.text.MIMEText("A test message")
        m["From"] = "foo@bar.com"
        m["To"] = "John, Dinsdale"
        smtp = smtplib.SMTP(HOST, self.port, local_hostname="localhost", timeout=3)
        smtp.send_message(m, from_addr="joe@example.com", to_addrs="foo@example.net")
        # XXX (see comment in testSend)
        time.sleep(0.01)
        smtp.quit()

        self.client_evt.set()
        self.serv_evt.wait()
        self.output.flush()
        # Add the X-Peer header that DebuggingServer adds
        m["X-Peer"] = socket.gethostbyname("localhost")
        mexpect = "%s%s\n%s" % (MSG_BEGIN, m.as_string(), MSG_END)
        self.assertEqual(self.output.getvalue(), mexpect)
        debugout = smtpd.DEBUGSTREAM.getvalue()
        sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
        self.assertRegex(debugout, sender)
        for addr in ("John", "Dinsdale"):
            to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE)
            self.assertNotRegex(debugout, to_addr)
        recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
        self.assertRegex(debugout, recip)
Example #3
1
    def testSendMessageResent(self):
        m = email.mime.text.MIMEText("A test message")
        m["From"] = "foo@bar.com"
        m["To"] = "John"
        m["CC"] = "Sally, Fred"
        m["Bcc"] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
        m["Resent-Date"] = "Thu, 1 Jan 1970 17:42:00 +0000"
        m["Resent-From"] = "holy@grail.net"
        m["Resent-To"] = "Martha <my_mom@great.cooker.com>, Jeff"
        m["Resent-Bcc"] = "doe@losthope.net"
        smtp = smtplib.SMTP(HOST, self.port, local_hostname="localhost", timeout=3)
        smtp.send_message(m)
        # XXX (see comment in testSend)
        time.sleep(0.01)
        smtp.quit()

        self.client_evt.set()
        self.serv_evt.wait()
        self.output.flush()
        # The Resent-Bcc headers are deleted before serialization.
        del m["Bcc"]
        del m["Resent-Bcc"]
        # Add the X-Peer header that DebuggingServer adds
        m["X-Peer"] = socket.gethostbyname("localhost")
        mexpect = "%s%s\n%s" % (MSG_BEGIN, m.as_string(), MSG_END)
        self.assertEqual(self.output.getvalue(), mexpect)
        debugout = smtpd.DEBUGSTREAM.getvalue()
        sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
        self.assertRegex(debugout, sender)
        for addr in ("my_mom@great.cooker.com", "Jeff", "doe@losthope.net"):
            to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE)
            self.assertRegex(debugout, to_addr)
Example #4
1
 def render(self, name, value, attrs=None, choices=()):
     output = self.get_renderer(name, value, attrs, choices).render()
     p = re.compile(r"</?(ul|li)>")
     output = p.sub("", output)
     p = re.compile(r"<label(.*?)>(<input.*?>)(.*?)</label>")
     output = p.sub(r"\2<label\1>\3</label>", output)
     return mark_safe(unicode(output))
Example #5
1
    def do_GET(self):
        try:
            if self.path.count("~~") == 1:
                regEx = re.compile("~~")
                end = regEx.search(self.path).end()
                targetpath = self.path[end:]
            elif self.path.count("~") == 1:
                regEx = re.compile("~")
                end = regEx.search(self.path).end()
                targetpath = webroot + self.path[end:]
            else:
                targetpath = curdir + self.path
            try:
                indexCount = os.listdir(targetpath).count(INDEX_FILE)
                if indexCount != 0:
                    targetpath = targetpath + INDEX_FILE
            except:
                dummyline = "dummyline"

            occurence = re.compile(".htm").search(targetpath)
            if occurence != None:
                self.serveHTML(targetpath)
                return
            else:
                self.servePLAIN(targetpath)
                return
        except IOError:
            try:
                self.serveDirectory(targetpath)
            except IOError:
                self.send_error(404, "File Not Found: %s" % self.path)
Example #6
1
    def _parseStatement(self, s):
        statement_re = re.compile("(.*)=(.*)")
        value_list_re = re.compile("([^,]*),?")
        if not statement_re.match(s):
            print "syntax error (statement match): %s" % repr(s)
            return
        statement_split = statement_re.split(s)
        if len(statement_split) != 4:
            print "syntax error (statement split): %s" % repr(s)
            return
        (foo, name, value, bar) = statement_split
        value_split = value_list_re.split(value)
        if len(value_split) < 2 or len(value_split) % 2 != 1:
            print "syntax error (value split): %s" % (repr(value_split))
            return
        try:
            value_array = []
            value_split.reverse()
            value_split.pop()
            while len(value_split) != 0:
                value_array.append(value_split.pop())
                value_split.pop()
        except:
            print traceback.print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)
            print "syntax error (value to array): %s" % (repr(value_split))
            return

        return (name, value_array)
Example #7
0
def grab_partn(f, start_delimiter, end_delimiter, buffer=""):
    """
    return the bit we want and where to start from next time
    """

    start_re = re.compile(start_delimiter)

    def next_bit(regex):
        nonlocal buffer
        lastbit = buffer
        while True:
            bit = f.read(READ_STEP)

            if len(bit) == 0:
                raise EOFError

            buffer += bit
            match = re.search(regex, lastbit + bit)
            if match:
                return (len(buffer) - len(bit) - len(lastbit)) + match.span()[0]
            lastbit = bit

    start_pos = next_bit(start_re)
    buffer = buffer[start_pos:]

    end_re = re.compile(end_delimiter)
    end_pos = next_bit(end_re) + len(end_delimiter)

    return (buffer[:end_pos], buffer[end_pos:])
Example #8
0
def do_headers(text_in):
    text_out = text_in

    whole_header_pattern = re.compile(
        "=+ [^=\n]+ =+"
    )  # TODO: modify this regex to make sure starts(^) and ends($) the line?? Because python treats the entire file as one line, this match only works for single-line files. I've modified the regex to just use the trailing newline.
    all_headers = whole_header_pattern.finditer(text_out)

    header_text = re.compile("[\w\s]+")
    count_eq = re.compile("=+")

    for header in all_headers:
        cur_hd_txt = header_text.search(header.group())  # should only be one set of text per header
        cur_eq_list = count_eq.findall(header.group())
        eq_num1 = len(cur_eq_list[0])
        eq_num2 = len(cur_eq_list[1])
        if eq_num1 != eq_num2:
            print "Equal signs don't match in current header: '" + header + "'"  # throw error
            continue  # just ignore current header, don't translate
        new_hd = "h" + str(eq_num1) + "." + cur_hd_txt.group().rstrip()  # rstrip gets rid of our trailing space
        # It appears there's a glitch with header.start(), it's matching '= header_text ===' instead of '=== header_text ===' and therefore .start() returns 2 spaces before our full match begins. Also does the same with 4 equal signs.
        # before = text_in[:(header.span()[0])]
        # after = text_in[(header.span()[1]):]
        # text_in = before+new_hd+after #replacing

        # The following line is a hacky band-aid for it.
        text_out = text_out.replace(header.group(), new_hd)

    return text_out
Example #9
0
    def getLastUpdated(self):
        """ parse the lsup time
        @note: there seems to be a problem with AM/PM not parsing correctly
        """
        logger.log(9, 'getLastUpdated() "%s"', self.updated)
        if self.zone < 0:
            return "%s  (GMT%s)" % (self.updated, self.zone)
        elif self.zone > 0:
            return "%s  (GMT+%s)" % (self.updated, self.zone)
        else:
            return "%s  (GMT)" % (self.updated)

        # this was a silly idea but the dates are very american
        am = re.compile("(.*) AM.*")
        if am.match(self.updated):
            value = time.strptime(am.match(self.updated).groups()[0], "%m/%d/%y %H:%M")
            return time.strftime("%c", time.localtime(time.mktime(value)))
        else:
            pm = re.compile("(.*) PM.*")
            if pm.match(self.updated):
                value = time.strptime(pm.match(self.updated).groups()[0], "%m/%d/%y %H:%M")
                (year, mon, day, hour, min, sec, weekday, yearday, saving) = value
                value = (year, mon, day, hour + 12, min, sec, weekday, yearday, saving)
                return time.strftime("%c", time.localtime(time.mktime(value)))
            else:
                return self.updated.replace(" Local Time", "")
def playVideo(url):
    content = getUrl(url)
    match = re.compile("m3u8 			: '(.*?)'", re.DOTALL).findall(content)
    finalUrl = match[0]
    if finalUrl:
        listitem = xbmcgui.ListItem(path=finalUrl)
        xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
    else:
        match = re.compile("reference_id 	: '(.+?)-title'", re.DOTALL).findall(content)
        content = getUrl(
            "http://static.discoverymedia.com/videos/components/hsw/" + match[0] + "-title/smil-service.smil"
        )
        match = re.compile('<meta name="httpBase".+?content="(.+?)"', re.DOTALL).findall(content)
        base = match[0]
        maxBitrate = 0
        match = re.compile('<video src="(.+?)" system-bitrate="(.+?)"', re.DOTALL).findall(content)
        for urlTemp, bitrateTemp in match:
            bitrate = int(bitrateTemp)
            if bitrate > maxBitrate:
                maxBitrate = bitrate
                finalUrl = urlTemp
        finalUrl = base + "/" + finalUrl + "?v=2.6.8&fp=&r=&g="
        listitem = xbmcgui.ListItem(path=finalUrl)
        xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
        if autoPlay > 0:
            xbmc.sleep(autoPlay * 1000)
            if xbmc.Player().isPlaying() == True and int(xbmc.Player().getTime()) == 0:
                xbmc.Player().pause()
Example #11
0
def disklog():
    # Measure disk usage
    redisk = re.compile("^/dev/([a-z0-9\\-]+) +([0-9]+) +([0-9]+) +([0-9]+)")
    remap = re.compile("^/dev/mapper/([a-z0-9\\-]+) +([0-9]+) +([0-9]+) +([0-9]+)")

    res = subprocess.Popen(["df", "-P", "-B", "1"], stdout=subprocess.PIPE)
    if res.wait() != 0:
        return

    out = res.communicate()[0].splitlines()
    for l in out:
        match = redisk.match(l)
        if match == None:
            match = remap.match(l)
        if match == None:
            continue

        dev = match.group(1)
        size = int(match.group(2))
        used = int(match.group(3))
        data = "N:" + str(size) + ":" + str(used)

        disk_check(dev)

        print ("disk: " + dev + ", " + data)
        os.system("rrdtool update rrd/disk-" + dev + ".rrd " + data)
Example #12
0
def memlog():
    # Measure remote ping rtt
    remem = re.compile("^Mem: +([0-9]+) +([0-9]+) +([0-9]+) +([0-9]+) +([0-9]+) +([0-9]+)")
    reswap = re.compile("^Swap: +([0-9]+) +([0-9]+) +([0-9]+)")

    mem_check()
    res = subprocess.Popen(["free", "-b"], stdout=subprocess.PIPE)
    if res.wait() == 0:
        lines = res.communicate()[0].splitlines()
        print lines
        out = lines[1]  # Mem:
        print out
        match = remem.match(out)
        print match
        used = int(match.group(2))
        buffers = int(match.group(5))
        cached = int(match.group(6))
        used = used - buffers - cached

        out = lines[3]  # Swap:
        match = reswap.match(out)
        swap = int(match.group(2))

        data = "N:" + str(used) + ":" + str(buffers) + ":" + str(cached)
        print ("mem: " + data)
        os.system("rrdtool update rrd/mem.rrd " + data)

        data = "N:" + str(swap)
        print ("swap: " + data)
        os.system("rrdtool update rrd/swap.rrd " + data)
Example #13
0
   def decodehtmlentities(self, string):
       """
	Transform HTML entities in their applicable characters from string.
	"""
       try:
           htmlEntities = re.compile("&(#?\w+?);", re.DOTALL).findall(string)
           for entity in htmlEntities:
               if entity[0] != "#" and htmlentitydefs.entitydefs.has_key(entity):
                   string = string.replace(
                       "&" + entity + ";",
                       str(unicode(htmlentitydefs.entitydefs[entity], "iso-8859-1").encode(self.internalEncoding)),
                   )
           htmlEntities = re.compile("&(#?\w+?);", re.DOTALL).findall(string)
           for entity in htmlEntities:
               if entity[0] == "#":
                   if entity[1] == "x":
                       string = string.replace(
                           "&" + entity + ";", unichr(int(entity[2:], 16)).encode(self.internalEncoding)
                       )
                   else:
                       string = string.replace(
                           "&" + entity + ";", unichr(int(entity[1:])).encode(self.internalEncoding)
                       )
       except Exception, e:
           self.trace("Error while decode HTML Entities", self.browsyTraceLevel + 5)
           traceback.print_exc()
Example #14
0
 def __init__(self, pattern, preprocess=True, wildcard=r"[\w-]"):
     self._original_pat = pattern
     self._back_refs = {}
     self._pattern = re.compile("")
     if preprocess:
         pat, self._back_refs = Pattern._preprocess(pattern, "(%s+)" % wildcard)
         self._pattern = re.compile(pat)
Example #15
0
 def __copy__(self):
     new = type(self)(self.label, self.active, None, None, self.filter_string)
     if self.filter is not None:
         new.filter = re.compile(self.filter.pattern, self.filter.flags)
     if self.byte_filter is not None:
         new.byte_filter = re.compile(self.byte_filter.pattern, self.byte_filter.flags)
     return new
def Episodes(url, name):
    # try:
    link = GetContentMob(url)
    newlink = "".join(link.splitlines()).replace("\t", "")
    match = re.compile(
        '<td style="text-align:justify" class="movieepisode"><strong>' + name + "</strong>(.+?)</td>"
    ).findall(newlink)
    mirrors = re.compile("<a [^>]*href=[\"']?([^>^\"^']+)[\"']?[^>]*>(.+?)</a>").findall(match[0])

    if len(mirrors) >= 1:
        i = 1
        for mcontent in mirrors:
            vLinktemp, vLinkName = mcontent
            vLink = ""
            j = 1
            k = 1
            for mlink in mirrors:
                vLink1, vLinkName1 = mlink
                if j >= i:
                    if i == len(mirrors) or j == len(mirrors) or k == 12:
                        vLink += viddomain + vLink1 + "+++" + vLinkName1
                    else:
                        vLink += viddomain + vLink1 + "+++" + vLinkName1 + "***"
                    if k % 12 == 0:
                        break
                    k += 1
                j += 1
            i += 1
            # addLink("tập:  " + RemoveHTML(vLinkName).strip(),mobileurl+"/"+vLink,3,'',"")
            addLink("Tập:  " + RemoveHTML(vLinkName).strip(), vLink, 3, "", "")
            print vLink
Example #17
0
 def get(self):
     file = self.request.get("file")
     self.response.out.write(self.text)
     if file:
         p1 = re.compile(u"\\n", re.DOTALL)
         p2 = re.compile(" ", re.DOTALL)
         p3 = re.compile(u"\\t", re.DOTALL)
         fname = file
         if os.path.isfile(fname):
             f1 = open(fname, "r")
             pat = re.compile(r"<([^>]*?)>", re.DOTALL | re.M)
             text1 = re.sub(pat, "&lt;\\1&gt;", f1.read())
             text = "<pre>"
             for line in text1:
                 text += "" + line + ""
             text += "</pre>"
             # text = f1.read()
             text = p1.sub(r"<br>", text)
             text = p2.sub(r"&nbsp;", text)
             text = p3.sub(r"&#09;", text)
             # f1.write(users.get_current_user())
             f1.close()
             self.response.out.write(text)
         else:
             self.response.out.write("error")
     else:
         self.redirect("/test/")
Example #18
0
def urls(request):
    if request.method == "GET":
        urls = list(URL.objects.values())
        urls_json = json.dumps(urls)
        return HttpResponse(urls_json, content_type="application/json")
    elif request.method == "POST":
        try:
            payload = json.loads(request.body)
        except:
            return mock_server_error("Fail to unmarshal json string")
        if "name" not in payload or "pattern" not in payload:
            return mock_server_error("Lacking required field")
        try:
            re.compile(payload["pattern"])
        except:
            return mock_server_error("invalid regular expression")
        url = URL(name=payload["name"], pattern=payload["pattern"])
        url.save()
        return mock_server_success()
    elif request.method == "DELETE":
        try:
            payload = json.loads(request.body)
        except:
            return mock_server_error("Fail to unmarshal json string")
        if "id" not in payload:
            return mock_server_error("Lacking required field:id")
        try:
            url = URL.objects.get(id=int(payload["id"]))
        except:
            return mock_server_error("URL not found")
        url.delete()
        return mock_server_success()
    else:
        return mock_server_error("HTTP method not supported.")
Example #19
0
def listA(name, type, url, thumb):
    link = GetHttpData(url)
    match1 = re.compile("<!-- 剧集列表 start -->(.+?)<!-- 剧集列表 end -->", re.DOTALL).findall(link)
    match2 = re.compile('<div class="left">(.+?)</div>', re.DOTALL).findall(match1[0])
    if match2:
        match = re.compile(r"'videoListCon', '(.+?)'", re.DOTALL).findall(match2[0])
        if match:
            FindItems(type, match1[0])
            for url in match:
                link = GetHttpData("http://www.juchang.com" + url)
                link = link.decode("gbk").encode("utf8")
                FindItems(type, link)
                match2 = re.compile('<a href="#" class="one"(.+?)<a class="two"', re.DOTALL).findall(link)
                if match2:
                    match3 = re.compile(r"'videoListCon','(.+?)'", re.DOTALL).findall(match2[0])
                    for urla in match3:
                        link = GetHttpData("http://www.juchang.com" + urla)
                        link = link.decode("gbk").encode("utf8")
                        FindItems(type, link)
        else:
            FindItems(type, match1[0])
    else:
        FindItems(type, match1[0])

    xbmcplugin.setContent(int(sys.argv[1]), "movies")
    xbmcplugin.endOfDirectory(int(sys.argv[1]))
Example #20
0
def lexPath(d):
    """
    returns and iterator that breaks path data 
    identifies command and parameter tokens
    """
    offset = 0
    length = len(d)
    delim = re.compile(r"[ \t\r\n,]+")
    command = re.compile(r"[MLHVCSQTAZmlhvcsqtaz]")
    parameter = re.compile(r"(([-+]?[0-9]+(\.[0-9]*)?|[-+]?\.[0-9]+)([eE][-+]?[0-9]+)?)")
    while 1:
        m = delim.match(d, offset)
        if m:
            offset = m.end()
        if offset >= length:
            break
        m = command.match(d, offset)
        if m:
            yield [d[offset : m.end()], True]
            offset = m.end()
            continue
        m = parameter.match(d, offset)
        if m:
            yield [d[offset : m.end()], False]
            offset = m.end()
            continue
        # TODO: create new exception
        raise Exception, "Invalid path data!"
Example #21
0
def scrap_results(contents, counters):

    Features = {}
    soup = bs(contents)
    soup_counter = bs(counters)

    # Features of the article

    # Summary = soup.p.i.text #Summary of the article
    Title = soup.title.text[: (len(soup.title.text) - 13)]  # The title of the article
    Features["Title"] = Title
    Date = soup.find_all(class_="gh_inarticledetails")[0].text
    Features["Date"] = Date
    Hour = soup.find_all(class_="gh_inarticledetails")[1].text[:5]
    Features["Time"] = Hour
    Author = soup.find_all(class_="gh_tab_articledetails_author")[0].text
    Features["Author"] = Author
    # Content = soup.find_all(class_ = "gh_articlecontent")[0].text[73:]
    # Related_links_list =[ip_ad+rel.get('href') for rel in soup.find_all(class_ = "gh_news_hometitle")]

    # Retrieving the number of views and the number of comments.
    Visits_Counter = 0
    Comments_Counter = 0

    for link in soup_counter.body.find_all("a", style=re.compile("views")):
        Visits_Counter = Visits_Counter + int(link.text[9:])
    for link in soup_counter.body.find_all("a", style=re.compile("comment")):
        Comments_Counter = Comments_Counter + int(link.text[10:])

    Features["Comments_count"] = Comments_Counter
    Features["Visits_count"] = Visits_Counter
    return Features
Example #22
0
 def parse_file(self, file, sections=[]):
     debug("ConfigParser: Reading file '%s'" % file)
     if type(sections) != type([]):
         sections = [sections]
     in_our_section = True
     f = open(file, "r")
     r_comment = re.compile("^\s*#.*")
     r_empty = re.compile("^\s*$")
     r_section = re.compile("^\[([^\]]+)\]")
     r_data = re.compile("^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)")
     r_quotes = re.compile('^"(.*)"\s*$')
     for line in f:
         if r_comment.match(line) or r_empty.match(line):
             continue
         is_section = r_section.match(line)
         if is_section:
             section = is_section.groups()[0]
             in_our_section = (section in sections) or (len(sections) == 0)
             continue
         is_data = r_data.match(line)
         if is_data and in_our_section:
             data = is_data.groupdict()
             if r_quotes.match(data["value"]):
                 data["value"] = data["value"][1:-1]
             self.__setitem__(data["key"], data["value"])
             if data["key"] in ("access_key", "secret_key", "gpg_passphrase"):
                 print_value = (data["value"][:2] + "...%d_chars..." + data["value"][-1:]) % (len(data["value"]) - 3)
             else:
                 print_value = data["value"]
             debug("ConfigParser: %s->%s" % (data["key"], print_value))
             continue
         warning("Ignoring invalid line in '%s': %s" % (file, line))
Example #23
0
def make_key_regex_filter(exclude_res, force_include_res=None):
    """Make a meta data filter using regular expressions.

    Parameters
    ----------
    exclude_res : sequence
        Sequence of regular expression strings. Any meta data where the key 
        matches one of these expressions will be excluded, unless it matches 
        one of the `force_include_res`.
    force_include_res : sequence
        Sequence of regular expression strings. Any meta data where the key 
        matches one of these expressions will be included.
        
    Returns
    -------
    A callable which can be passed to `DicomStack` as the `meta_filter`.
    """
    exclude_re = re.compile("|".join(["(?:" + regex + ")" for regex in exclude_res]))
    include_re = None
    if force_include_res:
        include_re = re.compile("|".join(["(?:" + regex + ")" for regex in force_include_res]))

    def key_regex_filter(key, value):
        return exclude_re.search(key) and not (include_re and include_re.search(key))

    return key_regex_filter
Example #24
0
def parse_ldd_output(output):
    """Parses the output from a run of 'ldd' on a binary.
    Returns a dictionary of {path: address} for
    each library required by the specified binary.

    Arguments:
      output(str): The output to parse

    Example:
        >>> sorted(parse_ldd_output('''
        ...     linux-vdso.so.1 =>  (0x00007fffbf5fe000)
        ...     libtinfo.so.5 => /lib/x86_64-linux-gnu/libtinfo.so.5 (0x00007fe28117f000)
        ...     libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007fe280f7b000)
        ...     libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007fe280bb4000)
        ...     /lib64/ld-linux-x86-64.so.2 (0x00007fe2813dd000)
        ... ''').keys())
        ['/lib/x86_64-linux-gnu/libc.so.6', '/lib/x86_64-linux-gnu/libdl.so.2', '/lib/x86_64-linux-gnu/libtinfo.so.5', '/lib64/ld-linux-x86-64.so.2']
    """
    expr_linux = re.compile(r"\s(?P<lib>\S?/\S+)\s+\((?P<addr>0x.+)\)")
    expr_openbsd = re.compile(r"^\s+(?P<addr>[0-9a-f]+)\s+[0-9a-f]+\s+\S+\s+[01]\s+[0-9]+\s+[0-9]+\s+(?P<lib>\S+)$")
    libs = {}

    for s in output.split("\n"):
        match = expr_linux.search(s) or expr_openbsd.search(s)
        if not match:
            continue
        lib, addr = match.group("lib"), match.group("addr")
        libs[lib] = int(addr, 16)

    return libs
Example #25
0
    def testSendMessageWithAddresses(self):
        m = email.mime.text.MIMEText("A test message")
        m["From"] = "foo@bar.com"
        m["To"] = "John"
        m["CC"] = "Sally, Fred"
        m["Bcc"] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
        smtp = smtplib.SMTP(HOST, self.port, local_hostname="localhost", timeout=3)
        smtp.send_message(m)
        # XXX (see comment in testSend)
        time.sleep(0.01)
        smtp.quit()
        # make sure the Bcc header is still in the message.
        self.assertEqual(m["Bcc"], 'John Root <root@localhost>, "Dinsdale" ' "<warped@silly.walks.com>")

        self.client_evt.set()
        self.serv_evt.wait()
        self.output.flush()
        # Add the X-Peer header that DebuggingServer adds
        m["X-Peer"] = socket.gethostbyname("localhost")
        # The Bcc header should not be transmitted.
        del m["Bcc"]
        mexpect = "%s%s\n%s" % (MSG_BEGIN, m.as_string(), MSG_END)
        self.assertEqual(self.output.getvalue(), mexpect)
        debugout = smtpd.DEBUGSTREAM.getvalue()
        sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
        self.assertRegex(debugout, sender)
        for addr in ("John", "Sally", "Fred", "root@localhost", "warped@silly.walks.com"):
            to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE)
            self.assertRegex(debugout, to_addr)
Example #26
0
 def compile_regexps(self):
     self.blank_matcher = re.compile("^\s*$")
     # out unwrap regexp looks for a line with no meaningful characters, or a line that starts in
     # ALLCAPS or a line that is only space. (we use this with .split() to break text up into
     # paragraph breaks.
     self.unwrap_matcher = re.compile("\n\W*\n")
     self.find_header_breaks_matcher = re.compile("\s+(?=[A-Z][A-Z][A-Z]+:.*)")
Example #27
0
    def testSendMessageWithMultipleFrom(self):
        # Sender overrides To
        m = email.mime.text.MIMEText("A test message")
        m["From"] = "Bernard, Bianca"
        m["Sender"] = "the_rescuers@Rescue-Aid-Society.com"
        m["To"] = "John, Dinsdale"
        smtp = smtplib.SMTP(HOST, self.port, local_hostname="localhost", timeout=3)
        smtp.send_message(m)
        # XXX (see comment in testSend)
        time.sleep(0.01)
        smtp.quit()

        self.client_evt.set()
        self.serv_evt.wait()
        self.output.flush()
        # Add the X-Peer header that DebuggingServer adds
        m["X-Peer"] = socket.gethostbyname("localhost")
        mexpect = "%s%s\n%s" % (MSG_BEGIN, m.as_string(), MSG_END)
        self.assertEqual(self.output.getvalue(), mexpect)
        debugout = smtpd.DEBUGSTREAM.getvalue()
        sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
        self.assertRegex(debugout, sender)
        for addr in ("John", "Dinsdale"):
            to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE)
            self.assertRegex(debugout, to_addr)
Example #28
0
def formatBugLinks(value):
    def addLink(match):
        linkApp = match.group(1)
        if linkApp != None:
            linkApp = linkApp.lower()
        linkType = match.group(2).lower()
        linkNum = int(match.group(3))
        if linkType == "topic":
            link = "https://adblockplus.org/forum/viewtopic.php?t=%i" % linkNum
        elif linkApp == None and linkType == "issue":
            link = "https://issues.adblockplus.org/ticket/%i" % linkNum
        elif linkApp == "webkit":
            link = "https://bugs.webkit.org/show_bug.cgi?id=%i" % linkNum
        elif linkApp != None:
            link = "http://code.google.com/p/chromium/issues/detail?id=%i" % linkNum
        else:
            link = "https://bugzilla.mozilla.org/show_bug.cgi?id=%i" % linkNum
        return '<a href="%s">%s</a>' % (link, match.group(0))

    regexp = re.compile(r'(https?://\S+?)([.,:;!?"\']?(?:\s|$))', re.I | re.U)
    regexp2 = re.compile(r"(?:\b(WebKit|Chrome|Chromium)\s+)?\b(bug|issue|topic)\s+(\d+)", re.I | re.U)
    value = unicode(Markup.escape(value))
    value = re.sub(regexp, r'<a href="\1">\1</a>\2', value)
    value = re.sub(regexp2, addLink, value)
    return Markup(value)
Example #29
0
    def __parse_positions(self, var):
        listPattern = re.compile("\[[^\[]+?\]")
        tagPattern = re.compile("<.+?>")
        betweenTagPattern = re.compile(">(.+?)<")
        numberPattern = re.compile("-?\d+\.?\d*")
        stringPattern = re.compile('".*?[^\\\\]"')

        positions = []
        columns = ("pid", "date", "stock", "percentage", "shares", "notes")
        for text in listPattern.findall(var):
            data = stringPattern.findall(text[1:-1])
            stock = betweenTagPattern.findall(data[0])[0]
            if self.user == list_user:
                percentage = shares = "NULL"
                notes = tagPattern.sub(" ", data[-1][1:-1])
            else:
                comments = tagPattern.split(data[-1][1:-1])
                try:
                    percentage = float(numberPattern.findall(comments[0])[0])
                except:
                    percentage = 0
                try:
                    shares = float(numberPattern.findall(comments[1])[0])
                except:
                    shares = 0
                try:
                    notes = comments[2]
                except:
                    notes = ""
            positions.append(
                dict(zip(columns, (self.id, self.now, stock, percentage, shares, notes.encode("ascii", "ignore"))))
            )
        return positions
Example #30
0
    def propose_definition(self):

        sentence = unicode(self.original_sentence)
        method_name = sentence

        groups = [('"', re.compile(r'("[^"]+")')), ("'", re.compile(r"('[^']+')"))]  # double quotes  # single quotes

        attribute_names = []
        for char, group in groups:
            match_groups = group.search(self.original_sentence)

            if match_groups:

                for index, match in enumerate(group.findall(sentence)):
                    if char == "'":
                        char = re.escape(char)

                    sentence = sentence.replace(match, u"%s(.*)%s" % (char, char))
                    group_name = u"group%d" % (index + 1)
                    method_name = method_name.replace(match, group_name)
                    attribute_names.append(group_name)

        method_name = unicodedata.normalize("NFKD", method_name).encode("ascii", "ignore")
        method_name = "%s(step%s)" % (
            "_".join(re.findall("\w+", method_name)).lower(),
            attribute_names and (", %s" % ", ".join(attribute_names)) or "",
        )

        return method_name, sentence