Esempio n. 1
0
async def search(message):
    query = message.ai.get_parameter("search_query")
    wiki = message.config["wiki"]
    if wiki is None or wiki.lower() == "wikipedia":
        try:
            page = wikipedia.page(query)
            summary = wikipedia.summary(query, sentences=3)
            embed = Embed(title=page.title, url=page.url, description=summary, timestamp=datetime.utcnow())
            try:
                embed.set_thumbnail(url=page.images[0])
            except (IndexError, AttributeError):
                pass
            suggestion = wikipedia.random()
            embed.set_footer(text="Wikipedia | Try asking \"What is {}?\"".format(suggestion))
            await message.reply(embed=embed)
        except (ValueError, wikipedia.WikipediaException):
            await message.reply("Sorry, I have no information for your search query `{}`.".format(query))
        return
    elif query is None:
        await message.reply("Sorry, I couldn't find a search query.", expire_time=5)
        return
    else:
        try:
            results = wikia.search(wiki, query)
            page = wikia.page(wiki, results[0])
            url = page.url.replace(" ", "_")
            embed = Embed(title=page.title, url=url, description=page.summary, timestamp=datetime.utcnow())
            try:
                embed.set_thumbnail(url=page.images[0])
            except (IndexError, AttributeError):
                pass
            embed.set_footer(text="{} wikia".format(wiki))
            await message.reply(embed=embed)
        except (ValueError, wikia.wikia.WikiaError):
            await message.reply("Sorry, I have no information for your search query `{}`.".format(query))
Esempio n. 2
0
def retrieve_character_real_name(sub_wikia, movie_character_name):
    """Retrieve the complete name of the movie character.
    
    The character's names in the movie script are often incomplete or
    shortened (for example, "Samwise Gamgee" appears as "Sam" in "The Lord
    of the Rings") so this function uses the movie's sub-wikia to detect
    the character's real name.

    The script will query the wikia search for the short name (for example,
    "Sam") and it will accept anything that shows up as the first result as
    the correct real name.

    It is likely that the movie script contains generic characters which do
    not properly exist in the wikia. For example, in "The Lord of the Rings:
    The Fellowship of the Ring":
        - Character name: "ORC OVERSEER"
        - Wikia search result: "List of unnamed original characters of the
        books and films"
    In cases in which the wikia search result returns "List of", the function
    will return None as the character's real name.

    Sometimes the search result will return something like this:
        - Character name: "HALDIR"
        - Wikia search result: "Haldir (disambiguation)"
    This might happen when there are multiple characters with the same name in
    the movie lore. The function will assume that the less important characters
    with the same name are irrelevant and it will automatically strip the
    substring "(disambiguation)".

    Args:
        sub_wikia (String): The sub-wikia to be queried
        movie_character_name (String): The character's 

    Returns:
        String: The character's real name
    """

    logger = logging.getLogger(__name__)

    black_list = ['List of']

    real_name = wikia.search(sub_wikia, movie_character_name)[0]

    logger.info('Resolved ' + movie_character_name + ' to ' + real_name)

    if __builtin__.any(x in real_name for x in black_list):
        logger.info('Rejecting possible invalid character.' + ' Name: ' +
                    movie_character_name + ' ; Real name: ' + real_name)
        real_name = None
    else:
        # Removing any "(disambiguation)" sub-strings
        real_name = real_name.split('(')[0].strip(' ')

    # Remove any special accents from the string
    real_name = unidecode.unidecode(unicode(real_name))

    return real_name
Esempio n. 3
0
def getAnyWikiaUrl(site, title):
    try:
        search = wikia.search(site, title)
        title = search[0]
        page = wikia.page(site, title)
        url = page.url
        url = url.replace(" ", "_")
        return urllib2.quote(url, safe="http://")
    except:
        print "api anywikiurl error"
Esempio n. 4
0
def wikia_search(string, charsOnly):
    result_list = []
    try:
        search_results = wikia.search('onehundredpercentorangejuice', string,
                                      3)
        for result in search_results:
            if result.lower().strip() in cards.keys():
                if charsOnly == True:
                    if cards[result.lower().strip()]['type'] == 'Character':
                        result_list.append(result.strip())
                else:
                    result_list.append(result.strip())
    except ValueError:  # strangely, the wikia.search() method throws a ValueError if it doesn't get any search results
        return result_list
    return result_list
Esempio n. 5
0
    def main(self):
        """
        Search for an article and return a short excerpt.
        """
        topics = wikia.search("Runescape", self.params.title)

        if isinstance(topics, list) and len(topics) > 0:
            article = wikia.page("Runescape", topics[0])

            print("- " + article.title + "\n")

            if self.params.more:
                print(article.content + "\n")
            else:
                print(article.summary + "\n")
            print(article.url)
Esempio n. 6
0
    async def hpwikia(self, *searchitems):
        searchitem = " ".join(searchitems)
        found = wikia.search("harrypotter", searchitem)[0]
        summary = wikia.summary("harrypotter", found)
        page = wikia.page("harrypotter", found)
        url = page.url
        clear_url = url.replace(' ', '_')
        image = page.images
        if image == []:
            image = "https://upload.wikimedia.org/wikipedia/commons/e/e5/Coat_of_arms_placeholder_with_question_mark_and_no_border.png"
        else:
            image = image[-1]
        title = page.title

        embed = discord.Embed(title=title, url=clear_url, description=summary)
        embed.set_thumbnail(url=image)
        await self.client.say(embed=embed)
Esempio n. 7
0
    async def search_wiki(self, ctx: commands.Context,
                          val: typing.Optional[int] = 1,
                          *, arg):

        if val <= 5:
            try:
                query = str(arg)
                search = wikia.search("thedivision", query)

                for i in range(val):
                    page = wikia.page("thedivision", search[i])
                    url = page.url.replace(' ', '_')
                    await ctx.send(f"<{url}>")
            except ValueError:
                await ctx.send("No results found")
        else:
            await ctx.send("Max of 5 results allowed.")
Esempio n. 8
0
def getWikiaUrl(site, title):
    try:
        if site == "lolwiki":
            site = "leagueoflegends"
        elif site == "rswiki":
            site = "2007.runescape"
        elif site == "hswiki":
            site = "hearthstone"
        elif site == "rsfi":
            site = "fi.runescape"
        search = wikia.search(site, title)
        title = search[0]
        page = wikia.page(site, title)
        url = page.url
        url = url.replace(" ", "_")
        return urllib2.quote(url, safe="http://")
    except:
        print "api wikiaurl error"
Esempio n. 9
0
def query_text(inline_query):
    total = 10
    try:
        search_results = wikia.search(WIKI, inline_query.query, total)
        results = []

        for i, page_result in enumerate(search_results):
            try:
                page = wikia.page(WIKI, page_result)
            except:
                break

            title, url = page.title, page.url
            url = url.replace(' ', '%20')
            results.append(telebot.types.InlineQueryResultArticle(str(i), title, url))

        BOT.answer_inline_query(inline_query.id, results)

    except Exception as ex:
        print(ex)
Esempio n. 10
0
def get_wiki_page(
    search_term
):  # Get the Wikia page for a particular search term. In this case, it gets the best 1.

    try:
        list_of_results = wikia.search(sub_wikia='choices-stories-you-play',
                                       query=search_term,
                                       results=5)
    except ValueError:  # This search for this term failed.
        return None

    best_result = list_of_results[0]
    if " " not in best_result:  # This is a single title article, which causes problems.
        best_result = get_wiki_page_google(search_term)

    # Return it as an WikiaPage object.
    best_result_object = wikia.page(sub_wikia='choices-stories-you-play',
                                    title=best_result)

    return best_result_object
Esempio n. 11
0
def query_text(inline_query):
    total = 10
    try:
        search_results = wikia.search(WIKI, inline_query.query, total)
        results = []

        for i, page_result in enumerate(search_results):
            try:
                page = wikia.page(WIKI, page_result)
            except:
                break

            title, url = page.title, page.url
            url = url.replace(' ', '%20')
            results.append(telebot.types.InlineQueryResultArticle(str(i), title, url))

        BOT.answer_inline_query(inline_query.id, results)

    except Exception as ex:
        print(ex)
Esempio n. 12
0
 def search(self, searchTerm: str):
     '''just returns a list of search results from the wikia'''
     return wikia.search(self.wiki, searchTerm)
Esempio n. 13
0
def get_wiki():
    resp = twiml.Response()
    sent_message = request.values.get('Body', None)

    # Initialize
    args = sent_message.split(" ",2);
    wiki = args[0]
    context = ""
    query = ""
    message = ""

    # If the user only types one word, this catches it and won't break the script
    if( len(args) > 1 ):
        context = args[1]
        if( len(args) > 2 ):
            query = args[2]
            

    # Dummy-free if statement
    # ? is used instead of help because the twilio trial doesn't allow access to help
    if( sent_message.lower() == "?" or sent_message.lower() == "'?'" ):
        message = """To search wikis, enter one of the following commands:
  'wiki' search 'query',
  'wiki' summary 'query',
  'wiki' toc 'query',
  'wiki' section 'toc' 'query',
  'wiki' full 'query',
  'wiki' url 'query'. 
'wiki' is the wiki you are searching in, for example wikipedia or marvel. 
'query' is the string you are searching for.
'toc' is the specific table of contents to read.
Please note that text over 1000 characters will be split into multiple messages. Sections are case-sensitive."""
    elif( wiki.lower() == "wikipedia" ):
        message = wiki_pedia(context, query)
    else:
        try:
            # Now this is what I call a true hack. This will check to see if a wiki exists
            # by searching for the common letter 'e'. If it cannot find it, then it is very
            # likely that the wikia either does not exist or is so small it's irrevelant.
            # There may be an edge case where a wikia purposefully never uses the letter 'e'.
            wikia.search(wiki, "e")
            
            message = wiki_a(wiki, context, query)
        except:
            message = "Invalid wiki. Type '?' for help."

    if( context.lower() == "image"):
        return str(resp)

    #client = TwilioRestClient(ACC_SID, AUTH_TOKEN)
    # Cuts messages so that they don't exceel twilio MMS 1600 character limit.
    i = 0
    while( len(message) > 1280 ):
        # 1280 is 160 (max SMS message) times 8.
        client = TwilioRestClient(ACC_SID, AUTH_TOKEN)
        sms = client.messages.create(
            to=request.values.get('From', None), 
            from_=PHONE_NUMBER, 
            body=message[:1280]+ "\n PAGE: " + str(i+1), 
        )
        message = message[1280:]
        print(str(i+1))
        i+=1#time.sleep(60)
    print("Returning")
    if (i > 0):
        message += "\n LAST PAGE"
    resp.message(message)    
    return str(resp)
Esempio n. 14
0
    async def displaypage(self, message_object, args):
        elements = args.split("/")
        if len(elements) > 1:
            try:
                print ("[a]")
                status = await self.pm.client.send_message(message_object.channel, ':information_source:`Looking up wikia page~`'.format())
                print ("[b]")
                await self.pm.client.send_typing(message_object.channel)
                print ("[c]")
                page = wikia.page(elements[0], elements[1])
                url = page.url
                print ("[d]")
                if len(elements) == 2:
                    print ("[e]")
                    header = '{0} > {1}'.format(elements[0], elements[1])
                    content = page.summary
                    print ("[e.5]")
                else:
                    print ("[f]")
                    header = '{0} > {1} > {2}'.format(elements[0], elements[1], elements[2])
                    content =  page.section(elements[2])
                    print ("[f.5]")
            except:
                try:
                    print ("[search]")
                    search = wikia.search(elements[0], elements[1])
                    print ("[search.1]")
                    results = ""
                    i = 1
                    print ("[search.2]")
                    for x in search:
                        results = results + "{0}: {1}\n".format(i, x)
                        i += 1
                    print ("[search.3]")
                    await self.pm.client.edit_message(status, ":information_source:**No page found, here's the search results instead**\n```{0}```\n*Select the page you want to view by responding with a number*".format(results))
                    print ("[search.4]")
                    response = await self.pm.client.wait_for_message(author=message_object.author)
                    print ("[search.7]")
                    try:
                        page = wikia.page(elements[0], search[int(response.content) - 1])
                        print ("[search.8]")
                        header = '{0} > {1}'.format(elements[0], search[int(response.content) - 1])
                        print ("[search.9]")
                        content = page.summary
                        print ("[search.10]")
                        url = page.url
                        print ("[search.11]")
                    except:
                        await self.pm.client.edit_message(status, ":exclamation:`Invalid Selection!`".format())
                        return
                except:
                    await self.pm.client.edit_message(status, ":exclamation:`Invalid Wikia or no results found!`".format())
                    return

            
            print ("[display.1]")
            tags = ""
            for x in page.sections:
                tags = tags + x + ', '
            print ("[display.1.5]")
            if len(content) > 1000:
                content = content[:1000]+"..."
            print ("[display.2]")
            em = discord.Embed(title='', description="**Summary**\n{0}\n\n**Sub Sections**\n{1}\n\n**Link**\n{2}".format(content, tags, url), colour=0x007AFF, url=url)
            em.set_author(name=header)
            em.set_footer(text="Noku-wikia version 1.0.5", icon_url=self.pm.client.user.avatar_url)
            print ("[display.3]")
            if len(page.images) > 0:
                em.set_thumbnail(url=page.images[0])

            #print(content)
            print ("[display.4]")
            try:
                await self.pm.client.send_message(message_object.channel, embed=em)
            except:
                await self.pm.client.send_message(message_object.channel, "***{3}***\n\n**Summary**\n{0}\n\n**Sub Sections**\n{1}\n\n**Link**\n{2}".format(content, tags, url, header))
            try:
                await self.pm.client.delete_message(status)
                await self.pm.client.delete_message(response)
            except:
                pass
Esempio n. 15
0
                         sliced_comment.index(word)):
                     missingTerm += sliced_comment[
                         sliced_comment.index(word) +
                         i].lower() + " "
         # If a tech keyword has been checked for, stop checking the comment
         else:
             break
     # Make sure the bot only replies to comments that have called it
     else:
         commentCommandError = False
 # Term could not be found in database, respond appropriately
 if defineKeyword == True and techKeyword == False:
     wikiSearchSuccesful = True
     try:
         wikiSearchResult = wikia.search("smashbros",
                                         missingTerm,
                                         results=10)[0]
         wikiSearchSummary = wikia.summary("smashbros",
                                           wikiSearchResult,
                                           chars=500,
                                           redirect=True)
         wikiSearchURL = wikia.page("smashbros",
                                    title=wikiSearchResult,
                                    pageid=None,
                                    redirect=True,
                                    preload=False).url
         #Make links that end with ) work
         if wikiSearchURL[len(wikiSearchURL) - 1] == ')':
             print("Fixing wiki link")
             wikiSearchURL = wikiSearchURL[:len(
                 wikiSearchURL
def process_summary_call(post):
  #special("__________________________________________________")
  #special("SUMMARY CALL: %s"%post.id)
  wikia = find_link(body)
  replacedbody = post.body.lower().replace('wikiabot','___uawb___wikiabot')
  if re.search(r'wikiabot.\s*tell\s.{1,23}\sabout\s+(an\s+|a\s+|the\s+|)(.*?)$',replacedbody):
    post_body = re.sub(r'wikiabot.\s*tell\s.{1,23}\sabout\s+(an\s+|a\s+|the\s+|)(.*?)$',r'\2',replacedbody).split('___uawb___')[1].split('.')[0].split('?')[0]
    term = post_body.strip()
  elif re.search(r'wikiabot.\s*wh.{1,3}(\'s|\s+is|\s+are|\s+was)\s+(an\s+|a\s+|the\s+|)(.*?)$',replacedbody):
    post_body = re.sub(r'wikiabot.\s*wh.{1,3}(\'s|\s+is|\s+are|\s+was)\s+(an\s+|a\s+|the\s+|)(.*?)$',r'\3',replacedbody).split('___uawb___')[1].split('.')[0].split('?')[0]
    term = post_body.strip()
  elif re.search("\?\-.*\-\?",replacedbody):
    term = re.search("\?\-.*\-\?",post.body.lower()).group(0).strip('?').strip('-').strip()

  special("SUMMARY CALL: %s @ %s"%(filter(lambda x: x in string.printable, term),post.id))
  if term.lower().strip() == 'love':
    #post_reply('*Baby don\'t hurt me! Now seriously, stop asking me about love so many times! O.o What were we discussing about in this thread again?*',post)
    return(False,False)
  #if term.lower().strip() == 'wikiabot':
    #post_reply('*Me! I know me.*',post)
    return(False,False)
  if term.lower().strip() == 'reddit':
    #post_reply('*This place. It feels like home.*',post)
    return(False,False)
  if term.strip().__len__() < 2 or term == None:
    log("EMPTY TERM")
    return(False,False)
  try:
    title = wikia.page(sub_wikia, term,).title
    # If it is the main page, don't try to summarise it
    if re.search("[M,m]ain_[P,p]age", title) or re.search("[W,w]iki", title):
      return (False, False)
    if title.lower() == term:
      bit_comment_start = ""
    elif title.lower() != term:
      try:
        discard = wikia.page(sub_wikia, term,redirect=False).title
      except Exception as e:
        if re.search('resulted in a redirect',str(e)):
          bit_comment_start = "*\"" + term.strip() + "\" redirects to* "
    else:
      bit_comment_start = "*Nearest match for* ***" + term.strip() + "*** *is* "
    if re.search(r'#',title):
      url = wikia.page(sub_wikia, title.split('#')[0],).url
      sectionurl =  url + "#" + title.split('#')[1]
      comment = "*Nearest match for* ***" + term.strip() + "*** *is the section ["+title.split('#')[1]+"]("+sectionurl.replace(')','\)')+") in article ["+title.split('#')[0]+"]("+url+").*\n\n---\n\n"
      post_reply(comment,post)
      log("RELEVANT SECTION SUGGESTED: %s"%filter(lambda x: x in string.printable, title))
      return (False,False)
    url_string = title
    log("INTERPRETATION: %s"%filter(lambda x: x in string.printable, title))
    return (url_string,bit_comment_start)
  except Exception as e:
    if bool(re.search('.*may refer to:.*',filter(lambda x: x in string.printable, str(e)))):
      deflist = ">Definitions for few of those terms:"
      for idx, val in enumerate(filter(lambda x: x in string.printable, str(e)).split('may refer to: \n')[1].split('\n')):
        deflist = deflist + "\n\n>1. **"+val.strip()+"**: "+ wikia.summary(sub_wikia, val,sentences=1)
        if idx > 3:
          break
      summary = "*Oops,* ***"+term.strip()+"*** *landed me on a disambiguation page.*\n\n---\n\n"+deflist+"\n\n---\n\n"
      log("ASKING FOR DISAMBIGUATION")
    else:
      log("INTERPRETATION FAIL: %s"%filter(lambda x: x in string.printable, term))
      try:
        terms = "\""+term+"\""
        suggesttitle = str(wikia.search(sub_wikia, terms,results=1)[0])
        log("SUGGESTING: %s"%filter(lambda x: x in string.printable, suggesttitle))
        if suggesttitle.lower() == term:
          bit_comment_start = ""
        else:
          bit_comment_start = "*Nearest match for* ***" + term.strip() + "*** *is* "
        if str(suggesttitle).endswith(')') and not re.search('\(',str(suggesttitle)):
          suggesttitle = suggesttitle[0:--(suggesttitle.__len__()-1)]
        return (str(suggesttitle),bit_comment_start)
      except:
        trialtitle = wikia.page(sub_wikia, term,).title
        if trialtitle.lower() == term:
          bit_comment_start = ""
        else:
          bit_comment_start = "*Nearest match for* ***" + term.strip() + "*** *is* "
        log("TRIAL SUGGESTION: %s"%filter(lambda x: x in string.printable, trialtitle))
        if str(trialtitle).endswith(')') and not re.search('\(',str(trialtitle)):
          trialtitle = trialtitle[0:--(trialtitle.__len__()-1)]
        return (str(trialtitle),bit_comment_start)
    post_reply(summary,post)
    return (False,False)
       continue
     except Exception as e:
       if bool(re.search('.*may refer to:.*',filter(lambda x: x in string.printable, str(e)))):
         deflist = ">Definitions for few of those terms:"
         for idx, val in enumerate(filter(lambda x: x in string.printable, str(e)).split('may refer to: \n')[1].split('\n')):
           deflist = deflist + "\n\n>1. **"+val.strip()+"**: "+ wikia.summary(sub_wikia, val,sentences=1)
           if idx > 3:
             break
         #comment = "*Oops,* ***"+process_brackets_syntax(url_string).strip()+"*** *landed me on a disambiguation page.*\n\n---"+deflist+"\n\n---\n\nAnd the remaining list:\n\n"+str(e).replace('\n','\n\n>')+"\n\n---\n\n"
         summary = "*Oops,* ***"+process_brackets_syntax(url_string).strip()+"*** *landed me on a disambiguation page.*\n\n---\n\n"+deflist+"\n\n---\n\n"
         log("ASKING FOR DISAMBIGUATION")
       else:
         log("INTERPRETATION FAIL: %s"%term)
         try:
           terms = "\""+term+"\""
           suggest = wikia.search(sub_wikia, terms,results=1)[0]
           trialsummary = wikia.summary(sub_wikia, suggest,)
           comment = "*Nearest match for* ***"+term.trim()+"*** *is* ***"+suggest+"*** :\n\n---\n\n>"+trialsummary+"\n\n---\n\n"
           log("SUGGESTING %s"%suggest)
         except:
           comment = "*Sorry, couldn't find a wikia article about that or maybe I couldn't process that due to Wikia server errors.*\n\n---\n\n"
           log("COULD NOT SUGGEST FOR %s"%term)
         post_reply(comment,post)
         continue
   continue
 data = strip_wiki(data)
 data = re.sub("Cite error: There are ref tags on this page, but the references will not show without a \{\{reflist\}\} template \(see the help page\)\.", '', data)
 #truncateddata = truncate(data,1000)
 if data.__len__() < 50:
   log("TOO SMALL INTRODUCTION PARAGRAPH")
   continue
Esempio n. 18
0
def GetComments():
    #####  Get the comments we've replied to.  #####
    posts_replied_to = fetchComments()
    print(posts_replied_to)
    reply = ""

    #####  Get the comments in the subreddit  #####
    for comment in subreddit.comments(limit=20):
        #####  Check to see if the post has already been replied to, or if it even needs to be  #####
        if str(
                comment.id
        ) in posts_replied_to or not "!spongebot " in comment.body or ">!spongebot " in comment.body:
            continue
        else:
            #####  New Comment that we need to reply to  #####
            comment_replied = False
            print("New Comment - " + comment.id)

            #####  Replace the wake word  #####
            searchTerm = comment.body.replace("!spongebot ", "")

            #####  See if we can find out what Season/Episode From the String  #####
            #####  Returns False, [True, seasion, episode], or ["episode name", season, episode]  #####
            episode_info = findEpisode(searchTerm)
            if not episode_info == False and not episode_info == None:
                if episode_info[0] == True:
                    #####  We looked, but there wasn't that episode in the CSV  #####
                    reply = "[Sorry](https://vignette.wikia.nocookie.net/spongefan/images/3/3c/Squidward_the_Loser_%3AP.jpg/revision/latest?cb=20130120163943), It doesn't look like theres a Season " + str(
                        episode_info[1]) + " Episode " + str(
                            episode_info[2]) + ", try an episode name."
                else:
                    #####  Let's split the episodes into their respective segments  #####
                    print(episode_info)
                    episodes = episode_info[0].split("/")
                    print(episodes)

                    #####  It's not a special, so theres more than 1 episode  #####
                    if len(episodes) > 1:
                        if not episode_info[3] == None:
                            segment = episode_info[3]
                        else:
                            segment = 0

                        print("Found Segment: " + str(segment))
                        if not segment == 0:
                            episode = episodes[segment - 1]
                            print("Extracted Episode Name: " + episode)
                            searchTerm = episode
                        else:
                            ##### Return the Episode Names with their respective URLS  #####
                            reply = "Here's What I found on for " + searchTerm + ": \n\n ------------------------------------ \n\n"
                            d = dict(enumerate(string.ascii_lowercase, 1))
                            i = 1
                            for episode in episodes:
                                episode_url = wikia.page(
                                    "Spongebob",
                                    wikia.search("Spongebob", episode)[0]).url
                                reply += "Season " + str(
                                    episode_info[1]
                                ) + " Episode " + str(episode_info[2]) + d[
                                    i] + ": [" + episode + "](" + urllib.parse.quote(
                                        episode_url).replace("%3A",
                                                             ":") + ") \n\n"
                                i = i + 1

                    else:
                        #####  If there's only one, than we can just search that name  #####
                        print(episodes)
                        searchTerm = episodes[0]
                        print("Found Episode: " + searchTerm)

            #####  If we don't already know what they want  #####
            if reply == "":
                #####  Look it up  #####
                print(searchTerm)
                search = wikia.search("Spongebob", searchTerm)
                print("Search returned - " + str(search))

                #####  Eh, first one looks good  #####
                closest = search[0]

                #####  If there's a gallery, we can likely get that page  #####
                if "gallery" in closest.lower():
                    closest = closest.replace(" (gallery)", "")

                #####  Get the Summary  #####
                summary = wikia.page("Spongebob", closest).content.replace(
                    u"\u2018", "'").replace(u"\u2019", "'").replace(
                        "\\xa0", " ").replace("0xc2", "").replace("\\xao", "")

                #####  Header for our response  #####
                reply = "Here's What I found on the Spongebob Wiki for [" + searchTerm + "](" + urllib.parse.quote(
                    wikia.page("Spongebob", closest).url).replace(
                        "%3A", ":"
                    ) + "): \n\n ------------------------------------ \n\n"

                #####  Let's maintain the lines  #####
                paragraphs = summary.split("\n")
                if len(paragraphs) < 3:
                    #####  There's less than 3 paragraphs, so we'll just set it to the max  #####
                    print("Wiki only returned " + str(len(paragraphs)))
                    endIndex = len(paragraphs)
                else:
                    #####  Otherwise, we only want 3  #####
                    endIndex = 3

                ##### If theres no summary, just return an error  #####
                if endIndex >= 1:
                    #####  Otherwise, return whatever we can  #####
                    for i in range(0, endIndex):
                        paragraph = paragraphs[i].strip()
                        reply += paragraph + "\n\n"
                else:
                    reply = "[Sorry](https://vignette.wikia.nocookie.net/spongefan/images/3/3c/Squidward_the_Loser_%3AP.jpg/revision/latest?cb=20130120163943), I didn't find anything regarding " + searchTerm + ", I usually work best with episode names."

            #####  Footer for our response  #####
            reply += "\n\n ------------------------------------ \n\n ^I'm ^a ^[bot](https://vignette.wikia.nocookie.net/spongebob/images/5/54/Robot_Spongebob2.jpg/revision/latest?cb=20130416211248), ^and ^this ^action ^was ^preformed ^automatically. \n\n Got a question for the creator? Message the evil genius [here](https://www.reddit.com/message/compose?to=pizzaface97&subject=SpongeBot2000%20Question)"
            print(reply)

            #####  Now let's try to post the comment  #####
            try:
                comment.reply(reply)
                #pass
            except praw.exceptions.APIException:
                #####  There was an issue, let's stop and try again later  #####
                print("Rate Limited -- Ending")
                comment_replied = False
            else:
                #####  We posted our reply  #####
                comment_replied = True

            #####  Log it, either way  #####
            if comment_replied == True:
                print("Comment Posted - " + comment.id)
                output = "Comment Posted - " + comment.id
                addComment(comment.id, True)
            else:
                print("No Comment Posted")
                output = "No Comment Posted"
                continue
Esempio n. 19
0
def wiki_a(wiki, context, query):
    # Initialize
    message = ""
    
    if( context.lower() == "search" ):
        message = ",  \n".join(wikia.search(wiki, query))
    elif( context.lower() == "summary" ):
        try:
            message = wikia.summary(wiki, query)
        except:
            message = ERROR
    elif( context.lower() == "toc" ):
        try:
            sec_list = wikia.page(wiki, query).sections
            # Throws together a list of numbered sections for section use below
            message = sec_list[0] + " (1)"
            x = 1
            for i, x in enumerate(sec_list[1:]):
                message += ",  \n" + x + " (" + str(i+2) + ")"
        except:
            message = ERROR
    elif( context.lower() == "section" ):
        try:
            # Uses the secton number to return query
            sec_list = wikipedia.page(query).sections
            store = query.split(" ",1)
            sec_num = int(store[0])
            query = store[1]
            message = wikipedia.page(query).section(sec_list[sec_num])
            if( message == NONE ):
                # This will happen often because the API is pretty shit
                message = "Unable to grab the section text."
        except:
            message = ERROR
    elif( context.lower() == "full" ):
        try:
            message = wikia.page(wiki, query).content
        except:
            message = ERROR
    elif( context.lower() == "image" ):
        try:
            x = ""
            img_list = wikipedia.page(query).images
            # Removing first letter to remove capital because wikipedia
            # is anal about uppercase and lowercase
            for i, x in enumerate(img_list):
                if query[1:] in x:
                    break
            if x == "":
                message = "Image could not be found."
            else:
                resp.message().media(x)
        except:
            ERROR
    elif( context.lower() == "url" ):
        try:
            message = wikia.page(wiki, query).url
        except:
            message = ERROR
    else:
        message = "Invalid context. Type '?' for help."
        
    return message
Esempio n. 20
0
 def test_wikia_search(self):
     match = re.search(cards.card_regex, '[][ qp]')
     group = match.groups()[0].strip()
     self.assertEqual(group, '][ qp')
     results_list = wikia.search('onehundredpercentorangejuice', group, 2)
     self.assertEqual(results_list[0].lower(), 'qp')
Esempio n. 21
0
 def isearch(self, searchTerm: str):
     '''returns a generator that iterates though the search results pages'''
     for i in wikia.search(self.wiki, searchTerm):
         yield wikia.page(self.wiki, i)