Пример #1
0
def get_wikia(wikia_title, query):
    try:
        response = wikia.summary(wikia_title, query)
        if response.startswith('REDIRECT '):
            query = response.replace('REDIRECT ', '')
        return wikia.summary(wikia_title, query)
    except wikia.WikiaError as e:
        print(e)
Пример #2
0
    async def hpwikia(self, *searchitems):
        searchitem = " ".join(searchitems)
        found = wikia.search("harrypotter", searchitem)[0]
        summary = wikia.summary("harrypotter", found)
        page = wikia.page("harrypotter", found)
        url = page.url
        clear_url = url.replace(' ', '_')
        image = page.images
        if image == []:
            image = "https://upload.wikimedia.org/wikipedia/commons/e/e5/Coat_of_arms_placeholder_with_question_mark_and_no_border.png"
        else:
            image = image[-1]
        title = page.title

        embed = discord.Embed(title=title, url=clear_url, description=summary)
        embed.set_thumbnail(url=image)
        await self.client.say(embed=embed)
Пример #3
0
                         i].lower() + " "
         # If a tech keyword has been checked for, stop checking the comment
         else:
             break
     # Make sure the bot only replies to comments that have called it
     else:
         commentCommandError = False
 # Term could not be found in database, respond appropriately
 if defineKeyword == True and techKeyword == False:
     wikiSearchSuccesful = True
     try:
         wikiSearchResult = wikia.search("smashbros",
                                         missingTerm,
                                         results=10)[0]
         wikiSearchSummary = wikia.summary("smashbros",
                                           wikiSearchResult,
                                           chars=500,
                                           redirect=True)
         wikiSearchURL = wikia.page("smashbros",
                                    title=wikiSearchResult,
                                    pageid=None,
                                    redirect=True,
                                    preload=False).url
         #Make links that end with ) work
         if wikiSearchURL[len(wikiSearchURL) - 1] == ')':
             print("Fixing wiki link")
             wikiSearchURL = wikiSearchURL[:len(
                 wikiSearchURL
             ) - 1] + '\\' + wikiSearchURL[len(wikiSearchURL) - 1:]
             #print wikiSearchURL
     except:
         print("!! Exception thrown when searching the Wiki!!")
def process_summary_call(post):
  #special("__________________________________________________")
  #special("SUMMARY CALL: %s"%post.id)
  wikia = find_link(body)
  replacedbody = post.body.lower().replace('wikiabot','___uawb___wikiabot')
  if re.search(r'wikiabot.\s*tell\s.{1,23}\sabout\s+(an\s+|a\s+|the\s+|)(.*?)$',replacedbody):
    post_body = re.sub(r'wikiabot.\s*tell\s.{1,23}\sabout\s+(an\s+|a\s+|the\s+|)(.*?)$',r'\2',replacedbody).split('___uawb___')[1].split('.')[0].split('?')[0]
    term = post_body.strip()
  elif re.search(r'wikiabot.\s*wh.{1,3}(\'s|\s+is|\s+are|\s+was)\s+(an\s+|a\s+|the\s+|)(.*?)$',replacedbody):
    post_body = re.sub(r'wikiabot.\s*wh.{1,3}(\'s|\s+is|\s+are|\s+was)\s+(an\s+|a\s+|the\s+|)(.*?)$',r'\3',replacedbody).split('___uawb___')[1].split('.')[0].split('?')[0]
    term = post_body.strip()
  elif re.search("\?\-.*\-\?",replacedbody):
    term = re.search("\?\-.*\-\?",post.body.lower()).group(0).strip('?').strip('-').strip()

  special("SUMMARY CALL: %s @ %s"%(filter(lambda x: x in string.printable, term),post.id))
  if term.lower().strip() == 'love':
    #post_reply('*Baby don\'t hurt me! Now seriously, stop asking me about love so many times! O.o What were we discussing about in this thread again?*',post)
    return(False,False)
  #if term.lower().strip() == 'wikiabot':
    #post_reply('*Me! I know me.*',post)
    return(False,False)
  if term.lower().strip() == 'reddit':
    #post_reply('*This place. It feels like home.*',post)
    return(False,False)
  if term.strip().__len__() < 2 or term == None:
    log("EMPTY TERM")
    return(False,False)
  try:
    title = wikia.page(sub_wikia, term,).title
    # If it is the main page, don't try to summarise it
    if re.search("[M,m]ain_[P,p]age", title) or re.search("[W,w]iki", title):
      return (False, False)
    if title.lower() == term:
      bit_comment_start = ""
    elif title.lower() != term:
      try:
        discard = wikia.page(sub_wikia, term,redirect=False).title
      except Exception as e:
        if re.search('resulted in a redirect',str(e)):
          bit_comment_start = "*\"" + term.strip() + "\" redirects to* "
    else:
      bit_comment_start = "*Nearest match for* ***" + term.strip() + "*** *is* "
    if re.search(r'#',title):
      url = wikia.page(sub_wikia, title.split('#')[0],).url
      sectionurl =  url + "#" + title.split('#')[1]
      comment = "*Nearest match for* ***" + term.strip() + "*** *is the section ["+title.split('#')[1]+"]("+sectionurl.replace(')','\)')+") in article ["+title.split('#')[0]+"]("+url+").*\n\n---\n\n"
      post_reply(comment,post)
      log("RELEVANT SECTION SUGGESTED: %s"%filter(lambda x: x in string.printable, title))
      return (False,False)
    url_string = title
    log("INTERPRETATION: %s"%filter(lambda x: x in string.printable, title))
    return (url_string,bit_comment_start)
  except Exception as e:
    if bool(re.search('.*may refer to:.*',filter(lambda x: x in string.printable, str(e)))):
      deflist = ">Definitions for few of those terms:"
      for idx, val in enumerate(filter(lambda x: x in string.printable, str(e)).split('may refer to: \n')[1].split('\n')):
        deflist = deflist + "\n\n>1. **"+val.strip()+"**: "+ wikia.summary(sub_wikia, val,sentences=1)
        if idx > 3:
          break
      summary = "*Oops,* ***"+term.strip()+"*** *landed me on a disambiguation page.*\n\n---\n\n"+deflist+"\n\n---\n\n"
      log("ASKING FOR DISAMBIGUATION")
    else:
      log("INTERPRETATION FAIL: %s"%filter(lambda x: x in string.printable, term))
      try:
        terms = "\""+term+"\""
        suggesttitle = str(wikia.search(sub_wikia, terms,results=1)[0])
        log("SUGGESTING: %s"%filter(lambda x: x in string.printable, suggesttitle))
        if suggesttitle.lower() == term:
          bit_comment_start = ""
        else:
          bit_comment_start = "*Nearest match for* ***" + term.strip() + "*** *is* "
        if str(suggesttitle).endswith(')') and not re.search('\(',str(suggesttitle)):
          suggesttitle = suggesttitle[0:--(suggesttitle.__len__()-1)]
        return (str(suggesttitle),bit_comment_start)
      except:
        trialtitle = wikia.page(sub_wikia, term,).title
        if trialtitle.lower() == term:
          bit_comment_start = ""
        else:
          bit_comment_start = "*Nearest match for* ***" + term.strip() + "*** *is* "
        log("TRIAL SUGGESTION: %s"%filter(lambda x: x in string.printable, trialtitle))
        if str(trialtitle).endswith(')') and not re.search('\(',str(trialtitle)):
          trialtitle = trialtitle[0:--(trialtitle.__len__()-1)]
        return (str(trialtitle),bit_comment_start)
    post_reply(summary,post)
    return (False,False)
   url_string = get_url_string(post)
   log("__________________________________________________")
   log("LINK TRIGGER: %s"%post.id)
   bit_comment_start = ""
 else:
   try:
     url_string = ""
     url_string, bit_comment_start = process_summary_call(post)
     if url_string == False:
       continue
     url_string = str(url_string)
   except Exception as e:
     if bool(re.search('.*may refer to:.*',filter(lambda x: x in string.printable, str(e)))):
       deflist = ">Definitions for few of those terms:"
       for idx, val in enumerate(filter(lambda x: x in string.printable, str(e)).split('may refer to: \n')[1].split('\n')):
         deflist = deflist + "\n\n>1. **"+val.strip()+"**: "+ wikia.summary(sub_wikia, val,sentences=1)
         if idx > 3:
           break
       summary = "*Oops,* ***"+url_string.strip()+"*** *landed me on a disambiguation page.*\n\n---\n\n"+deflist+"\n\n---\n\n"
       log("ASKING FOR DISAMBIGUATION")
       post_reply(summary,post)
       continue
 if not url_string:
   continue
 article_name_terminal = None
 sub_wikia = find_sub_wikia(link)
 # Screw it, I'm not digging through uncommented regexs
 url_string = url_string.replace("/", "")
 base_wikia_url = "https://" + sub_wikia + ".wikia.com/"
 is_section = False
 ### check for subheading in url string, process if present
Пример #6
0
def LOTR_search(arg):

    info = wikia.summary("lotr", arg)

    return info
Пример #7
0
def anime_search(arg):
    info = wikia.summary("anime", arg)
    if info.startswith("redirect"):
        info = wikia.summary("anime", info[8:])
    return info
Пример #8
0
# -*- coding: utf-8 -*-
"""
Filename: hs_collections_organiser.py
Date created: Mon Aug 24 21:06:15 2020
@author: Julio Hong
Purpose: Read wikitable to generate an Excel file
Steps: 
"""

import pandas as pd
from os import path
# I thought this would be useful but actually not really. It can't even scrape all the content properly.
import wikia
from wikia import html

# To adjust the dataframe appearance
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 200)

wikia.summary("Hypixel-Skyblock", "Collections")
collections_page = wikia.page("Hypixel-Skyblock", "Collections")
# I might as well scrape html using another more commonly-used lib.
html = collections_page.html()

# Each skill collection falls under table class=wikitable.
# But each item has a less consistent format.
# Kind of falls under 'tr' elem, but it's not unique to items
# Can also apply to tiers, or counts, or rewards.
# I can pull the data out. But how to organise it? That's the concern.
Пример #9
0
def wikia_summary(page_name):
    return wikia.summary('starwars', page_name)
Пример #10
0
def wiki_a(wiki, context, query):
    # Initialize
    message = ""
    
    if( context.lower() == "search" ):
        message = ",  \n".join(wikia.search(wiki, query))
    elif( context.lower() == "summary" ):
        try:
            message = wikia.summary(wiki, query)
        except:
            message = ERROR
    elif( context.lower() == "toc" ):
        try:
            sec_list = wikia.page(wiki, query).sections
            # Throws together a list of numbered sections for section use below
            message = sec_list[0] + " (1)"
            x = 1
            for i, x in enumerate(sec_list[1:]):
                message += ",  \n" + x + " (" + str(i+2) + ")"
        except:
            message = ERROR
    elif( context.lower() == "section" ):
        try:
            # Uses the secton number to return query
            sec_list = wikipedia.page(query).sections
            store = query.split(" ",1)
            sec_num = int(store[0])
            query = store[1]
            message = wikipedia.page(query).section(sec_list[sec_num])
            if( message == NONE ):
                # This will happen often because the API is pretty shit
                message = "Unable to grab the section text."
        except:
            message = ERROR
    elif( context.lower() == "full" ):
        try:
            message = wikia.page(wiki, query).content
        except:
            message = ERROR
    elif( context.lower() == "image" ):
        try:
            x = ""
            img_list = wikipedia.page(query).images
            # Removing first letter to remove capital because wikipedia
            # is anal about uppercase and lowercase
            for i, x in enumerate(img_list):
                if query[1:] in x:
                    break
            if x == "":
                message = "Image could not be found."
            else:
                resp.message().media(x)
        except:
            ERROR
    elif( context.lower() == "url" ):
        try:
            message = wikia.page(wiki, query).url
        except:
            message = ERROR
    else:
        message = "Invalid context. Type '?' for help."
        
    return message