Ejemplo n.º 1
0
lastload = int(float(time.strftime("%s")))
has_list = False
totalposted = 0

while True:
  try:
    #comments = r.get_comments("all",limit = 1000)
    #for post in comments:
    for post in praw.helpers.comment_stream(r,str(sys.argv[1]), limit = None, verbosity=0):
      
      ### Dirty timer hack
      now = int(float(time.strftime("%s")))
      diff = now - lastload
      if diff > 899:
	banned_users = banned_users_page.content_md.strip().split()
	bluelog("BANNED USER LIST RENEWED")
	save_changing_variables('scheduled dump')
	lastload = now
      
      if filterpass(post):
	if mod_switch:
	  try:
	    mod_switch_summon_on = re.search(r'halowikibot moderator switch: summon only: on',post.body.lower())
	    mod_switch_summon_off = re.search(r'halowikibot moderator switch: summon only: off',post.body.lower())
	    mod_switch_root_on = re.search(r'halowikibot moderator switch: root only: on',post.body.lower())
	    mod_switch_root_off = re.search(r'halowikibot moderator switch: root only: off',post.body.lower())
	    
	    mods = r.get_moderators(str(post.subreddit))
	    is_mod = False
	    for idx in range(0,len(mods)):
	      if mods[idx].name == post.author.name:
	### check for subheading in url string, process if present
	if re.search(r"#",article_name) and not summary_call:
	  pagename = article_name.split('#')[0]
	  if re.search('List of',pagename):
	      log('IS A LIST')
	      continue
	  if re.search('\)',pagename):
	    pagename = process_brackets_links(pagename)
	  pagename = urllib.unquote(pagename)
	  sectionname = article_name.split('#')[1]
	  if re.search('\)',sectionname):
	    sectionname = sectionname.replace(')','')
	    sectionname = sectionname.replace('\\','')
	  sectionname = sectionname.strip().replace('.','%')
	  sectionname = urllib.unquote(sectionname)
	  bluelog("TOPIC: %s"%filter(lambda x: x in string.printable, pagename))
	  bluelog("LINKS TO SECTION: %s"%filter(lambda x: x in string.printable, sectionname))
	  try:
	    page = wikipedia.page(pagename.encode('utf-8','ignore'),auto_suggest=False)
	    section = page.section(sectionname.encode('utf-8','ignore'))
	    if section == None or str(section.encode('utf-8','ignore')).strip() == "":
	      raise Exception("SECTION RETURNED EMPTY")
	    sectionname = sectionname.replace('_',' ')
	    link = page.url+"#"+sectionname
	    link = link.replace(')','\)')
	    page_url = page.url.replace(')','\)')
	    section = section.replace('\n','\n\n>')
	    success("TEXT PACKAGED")
	    section = truncate(section,1500)
	    comment = ("*Here's the linked section ["+sectionname+"]("+link+") from Wikipedia article ["+page.title+"]("+page_url+")* : \n\n---\n\n>"+section+"\n\n---\n\n[^(about)](http://) ^| *^(/u/"+post.author.name+" can reply with 'delete'. Will also delete if comment's score is -1 or less.)*  ^| ^[**Summon**](http://www.reddit.com/r/autowikibot/comments/1ux484/ask_wikibot/)")
	    post_reply(comment,post)
has_list = False
totalposted = 0

while True:
  try:
    #comments = r.get_comments("all",limit = 1000)
    #for post in comments:
    for post in praw.helpers.comment_stream(r,str(sys.argv[1]), limit = None, verbosity=0):
      link = find_link(post.body)
      ### Dirty timer hack
      now = int(float(time.strftime("%s")))
      diff = now - lastload
      # Every 15 minutes, update the banned user list and the OAuth credentials
      if diff > 899:
        banned_users = banned_users_page.content_md.strip().split()
        bluelog("BANNED USER LIST RENEWED")
        save_changing_variables('scheduled dump')
        access_information = r.refresh_access_information(refresh_token)
        bluelog("Updated OAuth access information")
        lastload = now

      if filterpass(post):
        if mod_switch:
          try:
            mod_switch_summon_on = re.search(r'wikiabot moderator switch: summon only: on',post.body.lower())
            mod_switch_summon_off = re.search(r'wikiabot moderator switch: summon only: off',post.body.lower())
            mod_switch_root_on = re.search(r'wikiabot moderator switch: root only: on',post.body.lower())
            mod_switch_root_off = re.search(r'wikiabot moderator switch: root only: off',post.body.lower())

            mods = r.get_moderators(str(post.subreddit))
            is_mod = False
 ### check for subheading in url string, process if present
 if re.search(r"#", article_name) and not summary_call:
     pagename = article_name.split('#')[0]
     if re.search('List of', pagename):
         log('IS A LIST')
         continue
     if re.search('\)', pagename):
         pagename = process_brackets_links(pagename)
     pagename = urllib.unquote(pagename)
     sectionname = article_name.split('#')[1]
     if re.search('\)', sectionname):
         sectionname = sectionname.replace(')', '')
         sectionname = sectionname.replace('\\', '')
     sectionname = sectionname.strip().replace('.', '%')
     sectionname = urllib.unquote(sectionname)
     bluelog("TOPIC: %s" %
             filter(lambda x: x in string.printable, pagename))
     bluelog(
         "LINKS TO SECTION: %s" %
         filter(lambda x: x in string.printable, sectionname))
     try:
         page = wikipedia.page(pagename.encode(
             'utf-8', 'ignore'),
                               auto_suggest=False)
         section = page.section(
             sectionname.encode('utf-8', 'ignore'))
         if section == None or str(
                 section.encode('utf-8',
                                'ignore')).strip() == "":
             raise Exception("SECTION RETURNED EMPTY")
         sectionname = sectionname.replace('_', ' ')
         link = page.url + "#" + sectionname