Exemple #1
0
    def command(self, connection, event, extra, dbconn):
        url = "http://api.urbandictionary.com/v0/define?%s"

        index = (self.pargs.nr or 1) - 1
        page = index / 10 + 1
        index %= 10
        term = " ".join(self.pargs.search_term)

        data = {"page": page, "term": plugins.to_utf8(term)}

        prefix = "\x0303[Urban Dictionary]\x03 "
        req = urllib2.Request(url % urllib.urlencode(data))

        try:
            all_entries = json.load(urllib2.urlopen(req))["list"]
            count = 10 * (page - 1) + len(all_entries)
            entry = all_entries[index]

            txt = "%s (%d/%d)\n" % (term, index + 1, count)
            definition = "\n".join(entry["definition"].splitlines())
            definition = re.sub(r'\[([^\[]*)\]', "\x02\\1\x0f", definition)
            txt += plugins.shorten(definition, 300)

            if entry.has_key("example"):
                example = "\n".join(entry["example"].splitlines())
                example = re.sub(r'\[([^\[]*)\]', "\x02\\1\x0f", example)
                txt += "\n\x02Example:\x0f " + plugins.shorten(example, 300)
        except Exception as e:
            print e
            if term == "dloser":
                return prefix + "The unstabliest D-System ever!"
            return prefix + "Definition not available."

        return prefix + txt
Exemple #2
0
def stats(user):
    txt = "\x0303[WeChall]\x03 "

    try:
        tree = lxml.html.parse(urllib2.urlopen(url % plugins.to_utf8(user)))
        page = urllib2.urlopen(url2 % plugins.to_utf8(user)).read()
    except Exception as e:
        return txt + "Network error."

    # ugly wechall parsing, thx a lot gizmore! ;PP
    try:
        real_user = tree.xpath("//div[@id='page']//th[text()='Username']/../td")[0].text_content()
        challs_solved = int(tree.xpath("count(//table[@id='wc_profile_challenges']/tr//a[@class='wc_chall_solved_1'])"))
        challs_total = int(tree.xpath("count(//table[@id='wc_profile_challenges']/tr)"))
        users_total = int(tree.xpath("//div[@id='wc_sidebar']//div[@class='wc_side_content']//div/a[@href='/users']")[0].text_content().split()[0])
        rank = int(re.findall(r'\d+', page)[-1])
    except:
        return txt + "The requested user was not found, you can register at https://www.wechall.net"

    txt += "%s solved %d (of %d) challenges and is on rank %s (of %d)." % (real_user, challs_solved, challs_total, rank, users_total)
    return txt
Exemple #3
0
    def execute(self):
        if not self.args:
            return

        headers = { 'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:14.0) Gecko/20100101 Firefox/14.0.1' }
        data = { 'sl' : 'auto', 'tl' : 'en', 'hl' : 'en', 'ie' : 'UTF-8', 'q' : plugins.to_utf8(self.args) }
        req = urllib2.Request("https://translate.google.com/", urllib.urlencode(data), headers)
        tree = lxml.html.parse(urllib2.urlopen(req))

        trans = tree.xpath("//span[@id='result_box']")
        if len(trans) > 0:
            txt = trans[0].text_content().strip()
            return "\x0303[Translation]\x03 %s" % txt
Exemple #4
0
    def execute(self):
        try:
            v = shlex.split(plugins.to_utf8(self.args or ""))
        except Exception as e:
            return str(e)

        if len(v) != 2:
            return self.help(self.cmd)

        term1, term2 = map(lambda x: plugins.from_utf8(x), v)
        hits1 = google_search(term1)[0]
        hits2 = google_search(term2)[0]
        h1 = locale.format("%d", hits1, 1)
        h2 = locale.format("%d", hits2, 1)

        return "%s %s %s %s %s" % (term1, h1, create_bar(hits1, hits2, 21), h2, term2)
Exemple #5
0
    def execute(self, connection, event, extra, dbconn):
        try:
            pargs = self.parser.parse_args(extra["args"])
            if self.parser.help_requested:
                return self.parser.format_help().strip()
        except Exception as e:
            return u"Error: %s" % str(e)

        data = {
            "action": "query",
            "list": "search",
            "srsearch": plugins.to_utf8(u" ".join(pargs.term)),
            "srlimit": 1,
            "srprop": "",
            "format": "json",
            "continue": ""
        }

        prefix = "\x0303[Wikipedia]\x03 "
        req = urllib2.Request(url % urllib.urlencode(data))

        try:
            title = json.load(
                urllib2.urlopen(req))["query"]["search"][0]["title"]
        except:
            return prefix + "Search didn't find anything."

        pageurl = "https://en.wikipedia.org/wiki/%s" % wikify(title)

        tree = lxml.html.parse(urllib2.urlopen(pageurl))
        title = get_text(tree, "//h1[@id='firstHeading']")
        content = get_text(tree, "//div[@id='mw-content-text']/div/p")

        if not title or not content:
            return prefix + "Something went wrong."

        if tree.xpath(
                "//div[@id='mw-content-text']//table[@id='disambigbox']"):
            content += " " + ", ".join(
                tree.xpath("//div[@id='mw-content-text']/ul/li//a[1]/@title"))

        txt = "%s (%s)\n%s" % (title, pageurl, plugins.shorten(content, 300))
        return prefix + txt
Exemple #6
0
    def execute(self):
        self.parser.set_defaults(user=self.nick, site=self.target[1:])

        try:
            pargs = self.parser.parse_args(shlex.split(plugins.to_utf8(self.args or "")))
            if self.parser.help_requested:
                return self.parser.format_help().strip()
            user = plugins.from_utf8(pargs.user)
            site = plugins.from_utf8(pargs.site).lower()
        except plugins.ArgumentParserError as e:
            return "Error: %s" % str(e)
        except (SystemExit, NameError, ValueError):
            return self.help(self.cmd)

        if not sitemap.has_key(site):
            return "Unknown site: %s" % site

        module = importlib.import_module("." + sitemap[site], path)
        globals()[module.__name__] = module
        return module.stats(user)
Exemple #7
0
def stats(user):
    url = "http://sabrefilms.co.uk/revolutionelite/w3ch4ll/userscore.php?username=%s"
    txt = "\x0303[Revolution Elite]\x03 "

    try:
        page = urllib2.urlopen(url % (plugins.to_utf8(user))).read()
    except Exception as e:
        return txt + "Network error."
        # raise e

    if page == "0":
        return txt + "The requested user was not found. You can register at http://revolutionelite.co.uk"

    match = page.split(":")
    if len(match) != 7:
        return txt + "Unexpected format in reply. Try ?blame."
    else:
        _, rank, pts, ptsmax, solved, solvedmax, usercount = match
        txt += "%s solved %s (of %s) challenges and is on rank %s (of %s)." % (user, solved, solvedmax, rank, usercount)
        return txt
Exemple #8
0
    def execute(self):
        try:
            pargs = self.parser.parse_args(shlex.split(plugins.to_utf8(self.args) or ""))
            if self.parser.help_requested:
                return self.parser.format_help().strip()
        except plugins.ArgumentParserError as e:
            return "error: %s" % str(e)
        except (SystemExit, NameError, ValueError):
            return self.help(self.cmd)

        url = "http://api.urbandictionary.com/v0/define?%s"

        index = (pargs.nr or 1) - 1
        page = index / 10 + 1
        index %= 10
        term = " ".join(pargs.search_term)

        data = {
            "page" : page,
            "term" : term
        }

        prefix = "\x0303[Urban Dictionary]\x03 "
        req = urllib2.Request(url % urllib.urlencode(data))

        try:
            all_entries = json.load(urllib2.urlopen(req))["list"]
            count = 10 * (page - 1) + len(all_entries)
            entry = all_entries[index]

            txt = "%s (%d/%d)\n" % (term, index + 1, count)
            definition = "\n".join(entry["definition"].splitlines())
            txt += plugins.shorten(definition, 300)

            if entry.has_key("example"):
                example = "\n".join(entry["example"].splitlines())
                txt += "\n\x02Example:\x0f " + plugins.shorten(example, 300)
        except:
            return prefix + "Definition not available."

        return prefix + txt
Exemple #9
0
    def execute(self, connection, event, extra, dbconn):
        self.parser.set_defaults(user_or_rank=event.source.nick)
        self.parser.set_defaults(site=event.target[1:] if irc.client.
                                 is_channel(event.target) else event.target)

        try:
            pargs = self.parser.parse_args(extra["args"])
            if self.parser.help_requested:
                return self.parser.format_help().strip()
            user, rank = None, None
            if pargs.numeric:
                rank = int(pargs.user_or_rank)
            else:
                user = pargs.user_or_rank
            site = pargs.site.lower()
            glob = vars(pargs)["global"]
        except Exception as e:
            return u"Error: %s" % unicode(e)

        if glob:
            wcurl = "https://www.wechall.net/wechall.php?%s"
            username = str(rank) if rank else user
            query = urllib.urlencode({"username": plugins.to_utf8(username)})
            res = plugins.from_utf8(urllib2.urlopen(wcurl % query).read())
            return "\x0303[WeChall Global]\x03 " + res

        if not sitemap.has_key(site):
            return u"Unknown site: %s" % site

        try:
            module = importlib.import_module("." + sitemap[site], path)
            globals()[module.__name__] = module
            site = module.Site()
            site.settings = self.settings
        except Exception as e:
            print e
            return u"SiteImportError: %s" % site

        return self.stats(site, user, rank)
Exemple #10
0
    def execute(self):
        if not self.args:
            return

        data = {
            "action" : "query",
            "list" : "search",
            "srsearch" : plugins.to_utf8(self.args),
            "srlimit" : 1,
            "srprop" : "",
            "format" : "json",
            "continue" : ""
        }

        prefix = "\x0303[Wikipedia]\x03 "
        req = urllib2.Request(url % urllib.urlencode(data))

        try:
            title = json.load(urllib2.urlopen(req))["query"]["search"][0]["title"]
        except:
            return prefix + "Search didn't find anything."

        pageurl = "https://en.wikipedia.org/wiki/%s" % wikify(title)

        tree = lxml.html.parse(urllib2.urlopen(pageurl))
        title = get_text(tree, "//h1[@id='firstHeading']")
        content = get_text(tree, "//div[@id='mw-content-text']/p")

        if not title or not content:
            return prefix + "Something went wrong."

        if tree.xpath("//div[@id='mw-content-text']//table[@id='disambigbox']"):
            content += " " + ", ".join(tree.xpath("//div[@id='mw-content-text']/ul/li//a[1]/@title"))

        txt = "%s (%s)\n%s" % (title, pageurl, plugins.shorten(content, 300))
        return prefix + txt