Exemple #1
0
def create_poem(inputwoord):
    w = Wikia(domain=inputwoord)
    wordlist = []
    wordlist1 = []

    print "start"
    while len(wordlist) < 10:
        for j in range(45):
            for i, title in enumerate(w.index(start='a', throttle=1.0, cached=True)):
                if i >= 3:
                    break
                article = w.search(title)
                words = repr(article.title).split()
                for word in words:
                    wordlist1.append(word.strip("u'"))
                for word in wordlist1:
                    if not word[0].isupper() and word.isalpha():
                        wordlist.append(word)

    for word in wordlist:
        print word

    return wordlist
Exemple #2
0
from pattern.web import Wikia

# This example retrieves articled from Wikia (http://www.wikia.com).
# Wikia is a collection of thousands of wikis based on MediaWiki.
# Wikipedia is based on MediaWiki too.
# Wikia queries request the article HTML source from the server. This can be slow.

domain = "monkeyisland"  # "Look behind you, a three-headed monkey!"

# Alternatively, you can call this script from the commandline
# and specify another domain: python 09-wikia.py "Bieberpedia".
if len(sys.argv) > 1:
    domain = sys.argv[1]

w = Wikia(domain, language="en")

# Like Wikipedia, we can search for articles by title with Wikia.search():
print(w.search("Three Headed Monkey"))

# However, we may not know exactly what kind of articles exist,
# three-headed monkey" for example does not redirect to the above article.

# We can iterate through all articles with the Wikia.articles() method
# (note that Wikipedia also has a Wikipedia.articles() method).
# The "count" parameter sets the number of article titles to retrieve per query.
# Retrieving the full article for each article takes another query. This can be slow.
i = 0
for article in w.articles(count=2, cached=True):
    print("")
    print(article.title)
Exemple #3
0
# -*- coding: utf-8 *-*
import os, sys, pprint
sys.path.insert(0, os.path.join("..", ".."))

from pattern.web import Wikia, WikiaArticleSet, URLTimeout

# This example retrieves an article from Wikipedia (http://en.wikipedia.org).
# A query requests the article's HTML source from the server, which can be quite slow.
# It is a good idea to cache results from Wikipedia locally,
# and to set a high timeout when calling Wikipedia.search().

domain = 'runescape'  # popular wiki
if len(sys.argv) > 1:
    domain = sys.argv[1]

engine = Wikia(language="en", domain=domain)

ArticleSet = WikiaArticleSet(engine, iterationLimit=200)

counter = 0
try:
    for page in ArticleSet:
        print counter, page.title
        counter = counter + 1
except URLTimeout:
    print "Timeout error."