def unused(inp):
	api = wikidotapi.connection() 
	api.Site = "scp-wiki"
	pages = api.refresh_pages()
	scps = []
	for page in pages:
		try:
			if "scp" in taglist[page]:
				val = page 
				scps.append(val)
		except (KeyError,IndexError):
			pass
	for i in range(001,2999):
		x = str(i)
		if i<100:
			x="0"+str(i)
		if i<10:
			x="00"+str(i)
		if x in val:
			continue
		else:
			if api.page_exists("scp-"+x):
				continue 
			else:
				return "The first unused page found is SCP-"+x+" - http://www.scp-wiki.net/scp-"+x
def showmore(inp):
	global seaiter
	global searesults
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	final = ""
	minval = seaiter*3+1
	maxval = seaiter*3+3
	__builtin__.seaiter +=1
	val= 0
	for result in searesults:
		val+=1
		if val == minval:
			title = api.get_page_item(result,"title")
			rating = api.get_page_item(result,"rating")
			final+= ""+title+""+"(Rating:"+str(rating)+")"
		if val<=maxval and val != minval and val>minval:
			title = api.get_page_item(result,"title")
			rating = api.get_page_item(result,"rating")
			final+= ", "+title+""+"(Rating:"+str(rating)+")"
	if val>maxval:
		final += ", With " + str(val-maxval) + " more matches."
	if final == "":
		return "There are no more matches to show."
	return final 
def unused(inp):
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	scps = []
	for page in pages:
		try:
			if "scp" in taglist[page]:
				val = page[3]+page[4]+page[5]+page[6]
				scps.append(val)
		except (KeyError,IndexError):
			pass
	for i in range(001,2999):
		x = str(i)
		if i<100:
			x="0"+str(i)
		if i<10:
			x="00"+str(i)
		if x in val:
			continue
		else:
			if api.page_exists("scp-"+x):
				continue 
			else:
				return "The first unused page found is SCP-"+x+" - http://www.scp-wiki.net/scp-"+x
Exemple #4
0
def scp(inp):  #this is for WL use, easily adaptable to SCP
    ".scp <Article #> -- Will return exact match of 'SCP-Article#'"
    api = wikidotapi.connection()  #creates API connection
    api.Site = "scp-wiki"
    pages = api.refresh_pages(
    )  #refresh page list provided by the API, is only a list of strings
    line = re.sub(
        "[ ,']", '-', inp
    )  #removes spaces and apostrophes and replaces them with dashes, per wikidot's standards
    for page in pages:
        if "scp-" + line.lower() == page:  #check for first match to input
            if api.page_exists(page.lower(
            )):  #only api call in .tale, verification of page existence
                try:  #must do error handling as the key will be wrong for most of the items
                    if "scp" in api.get_page_item(page,
                                                  "tags"):  #check for tag
                        rating = api.get_page_item(page, "rating")
                        if rating < 0:
                            ratesign = "-"
                        if rating >= 0:
                            ratesign = "+"  #adds + or minus sign in front of rating
                        ratestring = "Rating[" + ratesign + str(rating) + "]"
                        author = api.get_page_item(page, "created_by")
                        authorstring = "Written by " + author
                        title = api.get_page_item(page, "title")
                        sepstring = ", "
                        return "nonick::" + title + " (" + ratestring + sepstring + authorstring + ") - http://scp-wiki.net/" + page.lower(
                        )  #returns the string, nonick:: means that the caller's nick isn't prefixed
                except KeyError:
                    pass
                else:
                    return "nonick::Match found but page does not exist, please consult pixeltasim for error."
    return "nonick::Page not found"
def scpregexlowercase(match):
	if ' ' not in match.string:
		if match.string.startswith("scp-") or match.string.startswith("!scp-"):
			api = wikidotapi.connection() #creates API connection
			api.Site = "scp-wiki"
			pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
			page = re.sub("[!]",'',match.string.lower())
			if api.page_exists(page): #only api call in .tale, verification of page existence
				if "scp" in api.get_page_item(page,"tags"): #check for tag
					rating = api.get_page_item(page,"rating")
					if rating < 0:
						ratesign = "-"
					if rating >= 0:
						ratesign = "+" #adds + or minus sign in front of rating
					ratestring = "Rating:"+ratesign+str(rating)+"" 
					author = api.get_page_item(page,"created_by")
					if author == None:
						author = "unknown"
					authorstring = "Written by "+author
					title = api.get_page_item(page,"title")
					sepstring = ", "
					return ""+title+" ("+ratestring+sepstring+authorstring+") - http://scp-wiki.net/"+page.lower() #returns the string, nonick:: means that the caller's nick isn't prefixed
				else:
					return "Page exists but is either untagged or not an scp." 
			return "Page does not exist, but you can create it here: " + "http://scp-wiki.net/"+page
def unused(inp):
	api = wikidotapi.connection() 
	api.Site = "scp-wiki"
	pages = api.refresh_pages()
	scps = []
	for page in pages:
		try:
			if "scp" in taglist[page]:
				val = page 
				scps.append(val)
		except (KeyError,IndexError):
			pass
	for i in range(001,2999):
		x = str(i)
		if i<100:
			x="0"+str(i)
		if i<10:
			x="00"+str(i)
		if x in val:
			continue
		else:
			if api.page_exists("scp-"+x):
				continue 
			else:
				return "The first unused page found is SCP-"+x+" - http://www.scp-wiki.net/scp-"+x
def scp(inp): #this is for WL use, easily adaptable to SCP
	".scp <Article #> -- Will return exact match of 'SCP-Article#'"
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	line = re.sub("[ ,']",'-',inp) #removes spaces and apostrophes and replaces them with dashes, per wikidot's standards
	for page in pages: 
		if "scp-"+line.lower() == page: #check for first match to input
			if api.page_exists(page.lower()): #only api call in .tale, verification of page existence
				try: #must do error handling as the key will be wrong for most of the items
					if "scp" in api.get_page_item(page,"tags"): #check for tag
						rating = api.get_page_item(page,"rating")
						if rating < 0:
							ratesign = "-"
						if rating >= 0:
							ratesign = "+" #adds + or minus sign in front of rating
						ratestring = "Rating["+ratesign+str(rating)+"]" 
						author = api.get_page_item(page,"created_by")
						authorstring = "Written by "+author
						title = api.get_page_item(page,"title")
						sepstring = ", "
						return "nonick::"+title+" ("+ratestring+sepstring+authorstring+") - http://scp-wiki.net/"+page.lower() #returns the string, nonick:: means that the caller's nick isn't prefixed
				except KeyError:
					pass 
				else:
					return "nonick::Match found but page does not exist, please consult pixeltasim for error."
	return "nonick::Page not found"
		
def tale(inp): #this is for WL use, easily adaptable to SCP
	".tale <Article Name> -- Will return first page containing exact match to Article Name"
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	line = re.sub("[ ,']",'-',inp) #removes spaces and apostrophes and replaces them with dashes, per wikidot's standards
	for page in pages: 
		if line.lower() in page.lower(): #check for first match to input
			if api.page_exists(page.lower()): #only api call in .tale, verification of page existence
				if "tale" in api.get_page_item(page,"tags"): #check for tag
					rating = api.get_page_item(page,"rating")
					if rating < 0:
						ratesign = "-"
					if rating >= 0:
						ratesign = "+" #adds + or minus sign in front of rating
					ratestring = "Rating:"+ratesign+str(rating)+"" 
					author = api.get_page_item(page,"created_by")
					authorstring = "Written by "+author
					title = api.get_page_item(page,"title")
					sepstring = ", "
					return ""+title+" ("+ratestring+sepstring+authorstring+") - http://scp-wiki.net/"+page.lower() #returns the string, nonick:: means that the caller's nick isn't prefixed
				else:
					print page
					return "Page was found but it is either untagged or an administrative page."
			else:
				return "Match found but page does not exist, please consult pixeltasim for error."
	return "Page not found"
		
def cache_refresh():
	global pagecache
	api = wikidotapi.connection()
	pages = api.refresh_pages()
	print "Refreshing cache"
	for page in pages:
		pagecache.append(api.server.pages.get_meta({"site": api.Site, "pages": [page]}))
		time.sleep(0.5)
def lastcreated(inp):
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() 
	final = ""
	final+=""+api.get_page_item(pages[-1],"title")+"(Rating:"+str(api.get_page_item(pages[-1],"rating"))+") - http://www.scp-wiki.net/"+pages[-1]+" - "
	final+=""+api.get_page_item(pages[-2],"title")+"(Rating:"+str(api.get_page_item(pages[-2],"rating"))+") - http://www.scp-wiki.net/"+pages[-2]+" - "
	final+=""+api.get_page_item(pages[-3],"title")+"(Rating:"+str(api.get_page_item(pages[-3],"rating"))+") - http://www.scp-wiki.net/"+pages[-3]
	return  final
def wandererslibrary(inp): #this is for WL use, easily adaptable to SCP
	".tale <Article Name> -- Will return first page containing exact match to Article Name"
	api = wikidotapi.connection() #creates API connection
	api.Site = "wanderers-library"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	line = re.sub("[ ,']",'-',inp) #removes spaces and apostrophes and replaces them with dashes, per wikidot's standards
	for page in pages: 
		for item in pagecache: #iterates through ever attribute in the pagecache, similar to .author
			if line.lower() in page: #check for first match to input
				if api.page_exists(page.lower()): #only api call in .tale, verification of page existence
					try: #must do error handling as the key will be wrong for most of the items
						if "entry" in item[page]["tags"]: #check for tag
							rating = item[page]["rating"] 
							if rating < 0:
								ratesign = "-"
							if rating >= 0:
								ratesign = "+" #adds + or minus sign in front of rating
							ratestring = "Rating:"+ratesign+str(rating)+"" 
							author = item[page]["created_by"]
							authorstring = "Written by "+author
							title = item[page]["title"]
							sepstring = ", "
							return "nonick::"+title+" ("+ratestring+sepstring+authorstring+") - http://wanderers-library.wikidot.com/"+page.lower() #returns the string, nonick:: means that the caller's nick isn't prefixed
					except KeyError:
						pass 
				else:
					return "nonick::Match found but page does not exist, please consult pixeltasim for error."
	for page in pages: 
		for item in pagecache: #iterates through ever attribute in the pagecache, similar to .author
			try:
				if inp.lower() in item[page]["title"].lower(): #check for first match to input
					print item[page]["title"].lower()
					if api.page_exists(page.lower()): #only api call in .tale, verification of page existence
						#must do error handling as the key will be wrong for most of the items
							if "entry" in item[page]["tags"]: #check for tag
								rating = item[page]["rating"] 
								if rating < 0:
									ratesign = "-"
								if rating >= 0:
									ratesign = "+" #adds + or minus sign in front of rating
								ratestring = "Rating:"+ratesign+str(rating)+"" 
								author = item[page]["created_by"]
								authorstring = "Written by "+author
								title = item[page]["title"]
								sepstring = ", "
								return "nonick::"+title+" ("+ratestring+sepstring+authorstring+") - http://wanderers-library.wikidot.com/"+page.lower() #returns the string, nonick:: means that the caller's nick isn't prefixed
							else:
								return "nonick::Page was found but it is either untagged or an administrative page."
					else:
						return "nonick::Match found but page does not exist, please consult pixeltasim for error."
			except KeyError:
				pass 
	return "nonick::Page not found"
		


	
def author(inp):
	".author <Author Name> -- Will return details regarding the author"
	if firstrefresh == 0:#make sure the cache actually exists
		return "Cache has not yet updated, please wait a minute and search again."
	api = wikidotapi.connection()
	api.Site = "wanderers-library"
	pages = api.refresh_pages()
	authpages = []
	totalrating = 0
	pagetotal = 0
	pagerating = 0
	author = "None"
	multimatch = []
	authorpage = ""
	for page in pages:
		for item in pagecache: #these two for loops iterate through every item within each page dictionary, the proper syntax for accessing a specific item is item[page][itemname],
			try: 
				if "entry" in item[page]["tags"]: #makes sure only articles are counted
					if author == item[page]["created_by"]:
						authpages.append(page)
						pagetitle = item[page]["title"]
						pagerating = item[page]["rating"]
						totalrating = totalrating + pagerating
						print page
						pagetotal = pagetotal + 1 
					if inp.lower() in item[page]["created_by"].lower() and author == "None": #this just matches the author with the first author match
						author = item[page]["created_by"]
						authpages.append(page)
						pagetitle = item[page]["title"]
						pagerating = item[page]["rating"]
						totalrating = totalrating + pagerating
						print page
						pagetotal = pagetotal + 1 #all lines above provide page data, math is pretty easy and self-explanatory
				else:
					if "author" in item[page]["tags"]:
						if author == item[page]["created_by"]:
							authorpage = "http://wanderers-library.wikidot.com/"+item[page]["fullname"] +" - "
			except KeyError: #must do error handling for code to be valid, iterates through incorrect keys multiple times, do not print things in the except clause, slows down program immensely 
				pass
	for page in pages: #this loop checks to see if multiple authors match input 
		for item in pagecache:
			try:
				if "entry" in item[page]["tags"]:
					if inp.lower() in item[page]["created_by"].lower():
						multimatch.append(item[page]["created_by"])
			except KeyError:
				pass
	for authors in multimatch: #checks to see if multiple authors found 
		if authors != author:
			return "There are "+ str(len(multimatch)) + " authors matching you query. Please be more specifc. " 
	avgrating = 0
	if pagetotal is not 0: #just so no division by zero
		avgrating = totalrating/pagetotal
	if not authpages: #if no author pages are added 
		return "Author not found."
	return "nonick::"+ authorpage+""+author +" has written " + str(pagetotal) + " pages. They have " + str(totalrating)+ " net upvotes with an average rating of " + str(avgrating) + ". Their most recent article is " + pagetitle + "(Rating:" + str(pagerating) + ")"#+"- http://wanderers-library.wikidot.com/" + authpages[-1].lower()
def updatebans(inp, conn= None,chan = None):
	if chan == "#site67":
		try:
			__builtin__.alertops = 0
			api = wikidotapi.connection()
			#overwrite update
			localbancache = {}
			__builtin__.bancache = {}
			api.Site = "05command"
			pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
			source = api.server.pages.get_one({"site":api.Site,"page":"alexandra-s-ban-page"})
			content = source["content"]
			fs = content.split("-----")
			banlist = fs[1]
			invbans = banlist.split("\n")
			for ban in invbans:
				parts = ban.split("||")
				val = 0
				_list = []
				nick = ""
				author = ""
				for part in parts:
					val+=1
					if val ==2:
						#Nick
						nick = part 
						_list.append(nick)
					if val ==3:
						#IP
						_list.append(part)
					if val ==4:
						#unban date
						if part != "Ban Status":
							if part != "Perma":
								date = datetime.datetime.strptime(part,"%m/%d/%Y")
								today =datetime.datetime.today()
								if date.date() <= today.date():
									part = "Unbanned"
								_list.append(part)
							else:
								_list.append(part)
					if val ==5:
						#Reason
						_list.append(part)
				if nick != "Nick(s)":
					localbancache[nick] = _list
			__builtin__.bancache = localbancache
			print "Ban update complete."
			__builtin__.hugs = 0
			ts = time.time()
			__builtin__.lastbanrefresh = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
			conn.msg(chan, "Ban List Updated")
		except Exception as e:
			conn.msg(chan, "Ban List Update Failed, please check ban list for errors. Error is: "+e.message)
def ban_refresh():
    threading.Timer(900, ban_refresh).start()
    api = wikidotapi.connection()
    #overwrite update
    localbancache = {}
    __builtin__.bancache = {}
    api.Site = "05command"
    pages = api.refresh_pages(
    )  #refresh page list provided by the API, is only a list of strings
    source = api.server.pages.get_one({
        "site": api.Site,
        "page": "alexandra-s-ban-page"
    })
    content = source["content"]
    fs = content.split("-----")
    banlist = fs[1]
    invbans = banlist.split("\n")
    for ban in invbans:
        parts = ban.split("||")
        val = 0
        _list = []
        nick = ""
        author = ""
        for part in parts:
            val += 1
            if val == 2:
                #Nick
                nick = part
                _list.append(nick)
            if val == 3:
                #IP
                _list.append(part)
            if val == 4:
                #unban date
                if part != "Ban Status":
                    if part != "Perma":
                        date = datetime.datetime.strptime(part, "%m/%d/%Y")
                        today = datetime.datetime.today()
                        if date.date() <= today.date():
                            part = "Unbanned"
                        _list.append(part)
                    else:
                        _list.append(part)
            if val == 5:
                #Reason
                _list.append(part)
        if nick != "Nick(s)":
            localbancache[nick] = _list
    __builtin__.bancache = localbancache
    print "Ban update complete."
    __builtin__.hugs = 0
    ts = time.time()
    __builtin__.lastbanrefresh = datetime.datetime.fromtimestamp(ts).strftime(
        '%Y-%m-%d %H:%M:%S')
def cache_refresh():
    global pagecache
    api = wikidotapi.connection()
    pages = api.refresh_pages()
    print "Refreshing cache"
    for page in pages:
        pagecache.append(
            api.server.pages.get_meta({
                "site": api.Site,
                "pages": [page]
            }))
        time.sleep(0.5)
def ban_refresh():
	threading.Timer(900, ban_refresh).start (); 
	api = wikidotapi.connection()
	#overwrite update
	localbancache = {}
	__builtin__.bancache = {}
	api.Site = "05command"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	source = api.server.pages.get_one({"site":api.Site,"page":"alexandra-s-ban-page"})
	content = source["content"]
	fs = content.split("-----")
	banlist = fs[1]
	invbans = banlist.split("\n")
	for ban in invbans:
		parts = ban.split("||")
		val = 0
		_list = []
		nick = ""
		author = ""
		for part in parts:
			val+=1
			if val ==2:
				#Nick
				nick = part 
				_list.append(nick)
			if val ==3:
				#IP
				_list.append(part)
			if val ==4:
				#unban date
				if part != "Ban Status":
					if part != "Perma":
						date = datetime.datetime.strptime(part,"%m/%d/%Y")
						today =datetime.datetime.today()
						if date.date() <= today.date():
							part = "Unbanned"
						_list.append(part)
					else:
						_list.append(part)
			if val ==5:
				#Reason
				_list.append(part)
		if nick != "Nick(s)":
			localbancache[nick] = _list
	__builtin__.bancache = localbancache
	print "Ban update complete."
	__builtin__.hugs = 0
	ts = time.time()
	__builtin__.lastbanrefresh = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
	

	
def cache_refresh(): #calls itself automatically once called for the first time
	api = wikidotapi.connection()
	pages = api.refresh_pages()
	print "Refreshing cache"
	newpagecache = [] #the newpagecache is so that while it is updating you can still use the old one
	for page in pages:
		newpagecache.append(api.server.pages.get_meta({"site": api.Site, "pages": [page]}))
		time.sleep(0.4) #this keeps the api calls within an acceptable threshold
	print "Cache refreshed!"
	__builtin__.pagecache= newpagecache #__builtin__ means that pagecache is global and can be used by plugins
	ts = time.time()
	__builtin__.lastcacherefresh = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
	time.sleep(3600) #one hour 
	cache_refresh() #calls itself again
def untagged(inp):
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	final = "The following pages are untagged: "
	first = 1
	for page in pages:
		try:
			if taglist[page] == "":
				first =0
				final += titlelist[page] +"."
		except KeyError:
			pass 
	if first == 1:
		final = "No untagged pages found!"
	return final
def lastcreated(inp):
    api = wikidotapi.connection()  #creates API connection
    api.Site = "scp-wiki"
    pages = api.refresh_pages()
    final = ""
    final += "" + api.get_page_item(pages[-1], "title") + "(Rating:" + str(
        api.get_page_item(
            pages[-1],
            "rating")) + ") - http://www.scp-wiki.net/" + pages[-1] + " - "
    final += "" + api.get_page_item(pages[-2], "title") + "(Rating:" + str(
        api.get_page_item(
            pages[-2],
            "rating")) + ") - http://www.scp-wiki.net/" + pages[-2] + " - "
    final += "" + api.get_page_item(pages[-3], "title") + "(Rating:" + str(
        api.get_page_item(
            pages[-3], "rating")) + ") - http://www.scp-wiki.net/" + pages[-3]
    return final
def sea(inp): #this is for WL use, easily adaptable to SCP
	".sea <Article Name> -- Will return first three pages containing exact matches to Article Name, with number of other matches"
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	line = re.sub("[ ,']",'-',inp) #removes spaces and apostrophes and replaces them with dashes, per wikidot's standards
	results = []
	for page in titlelist: 
		if line.lower() in page.lower(): #check for first match to input
			if api.page_exists(page.lower()): #only api call in .tale, verification of page existence
				if "tale" in api.get_page_item(page,"tags") or "scp" in api.get_page_item(page,"tags"): #check for tag
					results.append(page)
					continue 
			else:
				return "Match found but page does not exist, please consult pixeltasim for error."
		if inp.lower() in titlelist[page].lower():
			if api.page_exists(page.lower()): #only api call in .tale, verification of page existence
				if "tale" in api.get_page_item(page,"tags") or "scp" in api.get_page_item(page,"tags"): #check for tag
					results.append(page)
	if results == []:
		return "No matches found."
	final = ""
	third = 0
	for result in results:
		third+=1
		if third == 1:
			title = api.get_page_item(result,"title")
			rating = api.get_page_item(result,"rating")
			final+= ""+title+""+"(Rating:"+str(rating)+")"
		if third<=3 and third != 1:
			title = api.get_page_item(result,"title")
			rating = api.get_page_item(result,"rating")
			final+= ", "+title+""+"(Rating:"+str(rating)+")"
	if third>3:
		final += ", With " + str(third-3) + " more matches."
	if third==1:
		page = results[0]
		title = titlelist[page]
		rating = api.get_page_item(page,"rating")
		final = ""+title+""+"(Rating:"+str(rating)+") - http://www.scp-wiki.net/"+page
	__builtin__.seaiter = 1
	__builtin__.searesults = results
	return final
def linkregex(inp):
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	substrings = inp.string.split()
	for ss in substrings:
		if "http://www.scp-wiki.net/" in ss:
			page = ss[24:]
			if api.page_exists(page): #only api call in .tale, verification of page existence
				rating = api.get_page_item(page,"rating")
				if rating < 0:
					ratesign = "-"
				if rating >= 0:
					ratesign = "+" #adds + or minus sign in front of rating
				ratestring = "Rating:"+ratesign+str(rating)+"" 
				author = api.get_page_item(page,"created_by")
				authorstring = "Written by "+author
				title = api.get_page_item(page,"title")
				sepstring = ", "
				return ""+title+" ("+ratestring+sepstring+authorstring+") - http://scp-wiki.net/"+page.lower() #returns the string, nonick:: means that the caller's nick isn't prefixed
Exemple #22
0
def cache_refresh(
):  #calls itself automatically once called for the first time
    api = wikidotapi.connection()
    pages = api.refresh_pages()
    print "Refreshing cache"
    newpagecache = [
    ]  #the newpagecache is so that while it is updating you can still use the old one
    for page in pages:
        newpagecache.append(
            api.server.pages.get_meta({
                "site": api.Site,
                "pages": [page]
            }))
        time.sleep(
            0.4)  #this keeps the api calls within an acceptable threshold
    print "Cache refreshed!"
    __builtin__.pagecache = newpagecache  #__builtin__ means that pagecache is global and can be used by plugins
    ts = time.time()
    __builtin__.lastcacherefresh = datetime.datetime.fromtimestamp(
        ts).strftime('%Y-%m-%d %H:%M:%S')
    time.sleep(3600)  #one hour
    cache_refresh()  #calls itself again
def scp(inp): #this is for WL use, easily adaptable to SCP
	".scp <Article #> -- Will return exact match of 'SCP-Article#'"
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	line = re.sub("[ ,']",'-',inp) #removes spaces and apostrophes and replaces them with dashes, per wikidot's standards
	page = "scp-"+inp.lower()
	if api.page_exists(page): #only api call in .tale, verification of page existence
		if "scp" in api.get_page_item(page,"tags"): #check for tag
			rating = api.get_page_item(page,"rating")
			if rating < 0:
				ratesign = "-"
			if rating >= 0:
				ratesign = "+" #adds + or minus sign in front of rating
			ratestring = "Rating:"+ratesign+str(rating)+"" 
			author = api.get_page_item(page,"created_by")
			authorstring = "Written by "+author
			title = api.get_page_item(page,"title")
			sepstring = ", "
			return ""+title+" ("+ratestring+sepstring+authorstring+") - http://scp-wiki.net/"+page.lower() #returns the string, nonick:: means that the caller's nick isn't prefixed
		else:
			return "Page exists but is either untagged or not an scp." 
	return "Page does not exist, but you can create it here: " + "http://scp-wiki.net/"+page
def untagged(inp):
	api = wikidotapi.connection() 
	api.Site = "scp-wiki"
	pages = api.refresh_pages() 
	final = "The following pages are untagged: "
	first = 1
	for page in pages:
		try:
			if taglist[page]:
				continue
			else:
				if page.startswith("forum:") or page.startswith("system") or page.startswith("nav") or page.startswith("css") or page.startswith("admin")or page.startswith("component")or page.startswith("search"):
					continue
				else:
					first = 0 
					final +=" - "+ page
		except KeyError:
			first = 0 
			final += page+" - "
			continue
	if first == 1:
		final = "No untagged pages found!"
	return final
def untagged(inp):
	api = wikidotapi.connection() 
	api.Site = "scp-wiki"
	pages = api.refresh_pages() 
	final = "The following pages are untagged: "
	first = 1
	for page in pages:
		try:
			if taglist[page]:
				continue
			else:
				if page.startswith("forum:") or page.startswith("system") or page.startswith("nav") or page.startswith("css") or page.startswith("admin")or page.startswith("component")or page.startswith("search"):
					continue
				else:
					first = 0 
					final +=" - "+ page
		except KeyError:
			first = 0 
			final += page+" - "
			continue
	if first == 1:
		final = "No untagged pages found!"
	return final
Exemple #26
0
def tale(inp):  #this is for WL use, easily adaptable to SCP
    ".tale <Article Name> -- Will return first page containing exact match to Article Name"
    if firstrefresh == 0:  #make sure the cache actually exists
        return "Cache has not yet updated, please wait a minute and search again."
    api = wikidotapi.connection()  #creates API connection
    api.Site = "wanderers-library"
    pages = api.refresh_pages(
    )  #refresh page list provided by the API, is only a list of strings
    line = re.sub(
        "[ ,']", '-', inp
    )  #removes spaces and apostrophes and replaces them with dashes, per wikidot's standards
    for page in pages:
        for item in pagecache:  #iterates through ever attribute in the pagecache, similar to .author
            if line.lower() in page:  #check for first match to input
                if api.page_exists(page.lower(
                )):  #only api call in .tale, verification of page existence
                    try:  #must do error handling as the key will be wrong for most of the items
                        if "entry" in item[page]["tags"]:  #check for tag
                            rating = item[page]["rating"]
                            if rating < 0:
                                ratesign = "-"
                            if rating >= 0:
                                ratesign = "+"  #adds + or minus sign in front of rating
                            ratestring = "Rating[" + ratesign + str(
                                rating) + "]"
                            author = item[page]["created_by"]
                            authorstring = "Written by " + author
                            title = item[page]["title"]
                            sepstring = ", "
                            return "nonick::" + title + " (" + ratestring + sepstring + authorstring + ") - http://wanderers-library.wikidot.com/" + page.lower(
                            )  #returns the string, nonick:: means that the caller's nick isn't prefixed
                    except KeyError:
                        pass
                else:
                    return "nonick::Match found but page does not exist, please consult pixeltasim for error."
    for page in pages:
        for item in pagecache:  #iterates through ever attribute in the pagecache, similar to .author
            try:
                if inp.lower() in item[page]["title"].lower(
                ):  #check for first match to input
                    print item[page]["title"].lower()
                    if api.page_exists(
                            page.lower()
                    ):  #only api call in .tale, verification of page existence
                        #must do error handling as the key will be wrong for most of the items
                        if "entry" in item[page]["tags"]:  #check for tag
                            rating = item[page]["rating"]
                            if rating < 0:
                                ratesign = "-"
                            if rating >= 0:
                                ratesign = "+"  #adds + or minus sign in front of rating
                            ratestring = "Rating[" + ratesign + str(
                                rating) + "]"
                            author = item[page]["created_by"]
                            authorstring = "Written by " + author
                            title = item[page]["title"]
                            sepstring = ", "
                            return "nonick::" + title + " (" + ratestring + sepstring + authorstring + ") - http://wanderers-library.wikidot.com/" + page.lower(
                            )  #returns the string, nonick:: means that the caller's nick isn't prefixed
                        else:
                            return "nonick::Page was found but it is either untagged or an administrative page."
                    else:
                        return "nonick::Match found but page does not exist, please consult pixeltasim for error."
            except KeyError:
                pass
    return "nonick::Page not found"
def ban(inp):
	api = wikidotapi.connection() #creates API connection
	api.Site = "scp-wiki"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	source = api.server.pages.get_one({"site":api.Site,"page":"scp-2900"})
	return "This function is for testing purposes."
Exemple #28
0
def authordetails(inp, nick=None):
    authpages = []
    totalrating = 0
    taletotal = 0
    scptotal = 0
    goitotal = 0
    pagerating = 0
    author = inp
    multimatch = []
    authorpage = ""
    found = 0
    exact = 0
    rewrite = 0
    orgauth = ""
    newauth = ""
    rewriteauthor = 0
    pagetitle = ""
    scprating = 0
    try:
        for page in scppages:
            if "scp" in taglist[page] or "tale" in taglist[
                    page] or "goi-format" in taglist[
                        page]:  #makes sure only articles are counted
                if ":rewrite:" in authorlist[page]:
                    bothauths = authorlist[page].split(":rewrite:")
                    orgauth = bothauths[0]
                    newauth = bothauths[1]
                    if author == newauth:
                        rewriteauthor = 1
                if author == authorlist[page] or rewriteauthor == 1:
                    found = 1
                    rewriteauthor = 0

                    authpages.append(page)
                    pagetitle = titlelist[page]
                    pagerating = ratinglist[page]
                    totalrating = totalrating + pagerating
                    if "scp" in taglist[page]:
                        scptotal += 1
                        scprating += pagerating
                    if "tale" in taglist[page]:
                        taletotal += 1
                    if "goi-format" in taglist[page]:
                        goitotal += 1
                try:
                    if inp.lower() in authorlist[page].lower(
                    ):  #this just matches the author with the first author match
                        if inp.lower() == authorlist[page].lower():
                            exact += 1
                        if exact == 1:
                            author = authorlist[page]
                            authpages = []
                            multimatch = [authorlist[page]]
                        elif exact < 1:
                            multimatch.append(authorlist[page])
                        if inp.lower() in authorlist[page].lower(
                        ) and found == 0:
                            author = authorlist[page]
                            if ":rewrite:" in authorlist[page]:
                                bothauths = authorlist[page].split(":rewrite:")
                                orgauth = bothauths[0]
                                newauth = bothauths[1]
                                if inp.lower() in orgauth.lower():
                                    author = orgauth
                                if inp.lower() in newauth.lower():
                                    author = newauth
                                rewrite = 1
                            found = 1
                            authpages.append(page)
                            pagetitle = titlelist[page]
                            pagerating = ratinglist[page]
                            totalrating = totalrating + pagerating
                            if "scp" in taglist[page]:
                                scptotal += 1
                                scprating += pagerating
                            if "tale" in taglist[page]:
                                taletotal += 1
                            if "goi-format" in taglist[page]:
                                goitotal += 1
                except AttributeError:
                    pass
            else:
                if "author" in taglist[page]:
                    if ":rewrite:" in authorlist[page]:
                        bothauths = authorlist[page].split(":rewrite:")
                        orgauth = bothauths[0]
                        newauth = bothauths[1]
                        if newauth == author:
                            authorpage = "http://scp-wiki.net/" + page + " - "
                    if author == authorlist[page]:
                        authorpage = "http://scp-wiki.net/" + page + " - "
    except KeyError:
        pass
    plusauth = []
    moreauthors = 1
    plusauth.append(author)
    for authors in multimatch:  #checks to see if multiple authors found
        z = 0
        if ":rewrite:" in authors:
            continue
        for foundauthor in plusauth:
            if foundauthor == authors:
                z = 1
        if authors != author:
            if z == 0:
                moreauthors += 1
                plusauth.append(authors)
    if moreauthors > 1:
        x = 0
        final = "Did you mean "
        for auth in plusauth:
            x += 1
            if x == 1:
                final += auth + ""
            if x == 2 and moreauthors == 2:
                final += " or " + auth + "?"
            if x == 2 and moreauthors > 2:
                final += ", " + auth + ""
            if x == 3 and moreauthors == 3:
                final += ", or " + auth + "?"
            if x == 3 and moreauthors > 3:
                final += ", or " + auth + "? With " + str(
                    moreauthors) + " more authors matching your query."
        return final
    avgrating = 0
    if taletotal + scptotal + goitotal is not 0:  #just so no division by zero
        avgrating = totalrating / (taletotal + scptotal + goitotal)
    if not authpages:  #if no author pages are added
        return "Author not found."
    api = wikidotapi.connection()
    api.Site = "alexandra-scp"
    table = ""
    for page in authpages:
        table += "||**" + titlelist[page] + "**||Rating: " + str(
            ratinglist[page]) + "||Tags: " + str(
                taglist[page]) + "|| http://scp-wiki.net/" + page + "||\n"
    api.set_page_item(author.replace(" ", "-").lower(),
                      "content",
                      "**Pages Authored:" + str(len(authpages)) +
                      "**\n \n**Number of SCPs Written:** " + str(scptotal) +
                      "\n**Number of Tales Written:** " + str(taletotal) +
                      "\n**Number of GOI Formats Written:** " + str(goitotal) +
                      "\n \n**Net Upvotes Received:** " + str(totalrating) +
                      "\n**Average Rating per Page:** " + str(avgrating) +
                      "\n**Average SCP Rating:** " +
                      str(scprating / scptotal) +
                      "\n++ Articles\n||Title||Rating||Tags||Link||\n" + table,
                      create=True)
    return "Author detail page created http://alexandra-scp.wikidot.com/" + author.replace(
        " ", "-").lower()
def test(inp, input=None, chan=None, nick=None):
    #check bans
    try:
        with open("bans.bans", "r+b") as f:
            data = f.readlines()
            val = 0
            for line in data:
                parts = line.split()
                host = parts[0]
                mtime = parts[1]
                if (datetime.datetime.now() -
                        mtime) > datetime.timedelta(minutes=1):
                    data.pop(val)
                    baninput.unban(host)
                    print "Unbanning " + host
                val += 1
    except EOFError:
        pass
    api = wikidotapi.connection()  #creates API connection
    #ban update
    localbandict = {}
    try:
        with open("ban.cache", "rb") as f:
            localbanddict = pickle.load(f)
    except EOFError:
        pass
    __builtin__.bandict = localbandict
    localbandict = {}
    api.Site = "05command"
    pages = api.refresh_pages(
    )  #refresh page list provided by the API, is only a list of strings
    source = api.server.pages.get_one({
        "site": api.Site,
        "page": "alexandra-s-ban-page"
    })
    content = source["content"]
    fs = content.split("-----")
    banlist = fs[1]
    invbans = banlist.split("\n")
    for ban in invbans:
        parts = ban.split("||")
        val = 0
        banlist = []
        nick = ""
        for part in parts:
            val += 1
            if val == 2:
                #nicks
                nick = part
                banlist.append(part)
            if val == 3:
                #IPs
                banlist.append(part)
            if val == 4:
                #status
                if nick != "Nick":
                    if part != "Perma":
                        mtime = datetime.datetime.strptime(part, "%m/%d/%Y")
                        if datetime.datetime.today() >= mtime:
                            print datetime.datetime.today()
                            print mtime
                            banlist.append("Unbanned")
                        else:
                            banlist.append(part)
                    else:
                        banlist.append(part)
            if val == 5:
                #reason
                banlist.append(part)

        if nick != "Nick":
            localbandict[nick] = banlist
    print localbandict
    __builtin__.bandict = localbandict
    print "Ban update complete."

    with open("ban.cache", "wb") as f:
        pickle.dump(localbandict, f)
    ts = time.time()
    __builtin__.lastbanrefresh = datetime.datetime.fromtimestamp(ts).strftime(
        '%Y-%m-%d %H:%M:%S')
Exemple #30
0
def refresh_cache():
	api = wikidotapi.connection()
	pages =  api.refresh_pages()
	for page in pages:
		 pagecache.append(api.server.pages.get_meta({"site": api.Site, "pages": [page]}))
	print "\n" + time.ctime()
def updatebans(inp, conn=None, chan=None):
    if chan == "#site67":
        try:
            __builtin__.alertops = 0
            api = wikidotapi.connection()
            #overwrite update
            localbancache = {}
            __builtin__.bancache = {}
            api.Site = "05command"
            pages = api.refresh_pages(
            )  #refresh page list provided by the API, is only a list of strings
            source = api.server.pages.get_one({
                "site": api.Site,
                "page": "alexandra-s-ban-page"
            })
            content = source["content"]
            fs = content.split("-----")
            banlist = fs[1]
            invbans = banlist.split("\n")
            for ban in invbans:
                parts = ban.split("||")
                val = 0
                _list = []
                nick = ""
                author = ""
                for part in parts:
                    val += 1
                    if val == 2:
                        #Nick
                        nick = part
                        _list.append(nick)
                    if val == 3:
                        #IP
                        _list.append(part)
                    if val == 4:
                        #unban date
                        if part != "Ban Status":
                            if part != "Perma":
                                date = datetime.datetime.strptime(
                                    part, "%m/%d/%Y")
                                today = datetime.datetime.today()
                                if date.date() <= today.date():
                                    part = "Unbanned"
                                _list.append(part)
                            else:
                                _list.append(part)
                    if val == 5:
                        #Reason
                        _list.append(part)
                if nick != "Nick(s)":
                    localbancache[nick] = _list
            __builtin__.bancache = localbancache
            print "Ban update complete."
            __builtin__.hugs = 0
            ts = time.time()
            __builtin__.lastbanrefresh = datetime.datetime.fromtimestamp(
                ts).strftime('%Y-%m-%d %H:%M:%S')
            conn.msg(chan, "Ban List Updated")
        except Exception as e:
            conn.msg(
                chan,
                "Ban List Update Failed, please check ban list for errors. Error is: "
                + e.message)
def test(inp,input = None,chan =None,nick = None   ):
	#check bans
	try:
		with open("bans.bans","r+b") as f:
			data = f.readlines()
			val = 0
			for line in data:
				parts = line.split()
				host = parts[0]
				mtime = parts[1]
				if (datetime.datetime.now()-mtime)>datetime.timedelta(minutes=1): 
					data.pop(val)
					baninput.unban(host)
					print "Unbanning "+host 
				val +=1
	except EOFError:
		pass
	api = wikidotapi.connection() #creates API connection
	#ban update 
	localbandict={}
	try:
		with open("ban.cache","rb") as f:
			localbanddict = pickle.load(f)
	except EOFError:
		pass
	__builtin__.bandict = localbandict
	localbandict = {}
	api.Site = "05command"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	source = api.server.pages.get_one({"site":api.Site,"page":"alexandra-s-ban-page"})
	content = source["content"]
	fs = content.split("-----")
	banlist = fs[1]
	invbans = banlist.split("\n")
	for ban in invbans:
		parts = ban.split("||")
		val = 0
		banlist = []
		nick = ""
		for part in parts:
			val+=1
			if val ==2:
				#nicks
				nick = part
				banlist.append(part)
			if val ==3:
				#IPs
				banlist.append(part)
			if val ==4:
				#status
				if nick != "Nick":
					if part != "Perma":
						mtime = datetime.datetime.strptime(part,"%m/%d/%Y")
						if datetime.datetime.today() >= mtime:
							print datetime.datetime.today()
							print mtime
							banlist.append("Unbanned")
						else:
							banlist.append(part)
					else:
						banlist.append(part)
			if val ==5:
				#reason
				banlist.append(part)
				
		if nick != "Nick":
			localbandict[nick] = banlist
	print localbandict
	__builtin__.bandict = localbandict
	print "Ban update complete."
	
	with open("ban.cache","wb") as f:
		pickle.dump(localbandict,f)
	ts = time.time()
	__builtin__.lastbanrefresh = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def cache_refresh(): #calls itself automatically once called for the first time
	api = wikidotapi.connection()

	#file reading
	localauthorlist = {}
	localtitlelist = {}
	localtaglist = {}
	scpcache = {}
	api.Site = "scp-wiki"
	pages = api.refresh_pages()
	try:
		with open("cache.cache","rb") as f:
			scpcache = pickle.load(f)
	except EOFError:
		pass
	if len(scpcache) != 0:
		print "Reading cache"
		__builtin__.scppagecache = scpcache
		for page in pages:
			for item in scpcache:
				try:
					localauthorlist[page] = item[page]["created_by"]
					localtitlelist[page] = item[page]["title"]
					localtaglist[page] = item[page]["tags"]
				except KeyError:
				 pass
	__builtin__.authorlist = localauthorlist
	__builtin__.titlelist = localtitlelist
	__builtin__.taglist = localtaglist
	
	#WL
	__builtin__.callsmade = 0
	api.Site = "wanderers-library"
	pages = api.refresh_pages()
	__builtin__.totalpagescurcache = len(pages)
	print "Refreshing WL cache"
	newpagecache = [] #the newpagecache is so that while it is updating you can still use the old one
	for page in pages:
		newpagecache.append(api.server.pages.get_meta({"site": api.Site, "pages": [page]}))
		time.sleep(0.4) #this keeps the api calls within an acceptable threshold
		__builtin__.callsmade+=1
	print "Cache refreshed!"
	__builtin__.pagecache= newpagecache #__builtin__ means that pagecache is global and can be used by plugins
	ts = time.time()
	__builtin__.lastcacherefresh = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
	
	#SCP
	#SCP
	__builtin__.callsmade = 0
	api.Site = "scp-wiki"
	pages = api.refresh_pages()
	__builtin__.totalpagescurcache = len(pages)
	print "Refreshing SCP cache"
	__builtin__.totalpagescurcache = len(pages)
	newpagecache = [] #the newpagecache is so that while it is updating you can still use the old one
	localauthorlist = {}
	localtitlelist = {}
	localtaglist = {}
	for page in pages:
		x = api.server.pages.get_meta({"site": api.Site, "pages": [page]})
		cache = {}
		cache[page] = x[page]
		localauthorlist[page] = cache[page]["created_by"]
		localtitlelist[page] = cache[page]["title"]
		localtaglist[page] = cache[page]["tags"]
		newpagecache.append(x)
		time.sleep(0.3) #this keeps the api calls within an acceptable threshold
		__builtin__.callsmade +=1 
	__builtin__.authorlist = localauthorlist
	__builtin__.titlelist = localtitlelist
	__builtin__.taglist = localtaglist
	print "Cache refreshed!"
	__builtin__.scppagecache= newpagecache #__builtin__ means that pagecache is global and can be used by plugins
	
	with open("cache.cache","wb") as f:
		pickle.dump(newpagecache,f)

	#end	
	ts = time.time()
	lastcacherefresh = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
	time.sleep(3600) #one hour 
	cache_refresh() #calls itself again
def authordetails(inp,nick=None):
	authpages = []
	totalrating = 0
	taletotal = 0
	scptotal = 0
	goitotal = 0
	pagerating = 0
	author = inp
	multimatch = []
	authorpage = ""
	found = 0
	exact = 0
	rewrite =0
	orgauth = ""
	newauth = ""
	rewriteauthor =0
	pagetitle = ""
	scprating = 0
	try:
		for page in scppages:
			if "scp" in taglist[page] or "tale" in taglist[page] or "goi-format" in taglist[page]: #makes sure only articles are counted
				if ":rewrite:" in authorlist[page]: 
					bothauths = authorlist[page].split(":rewrite:")
					orgauth = bothauths[0]
					newauth = bothauths[1]
					if author == newauth: 
						rewriteauthor = 1
				if author == authorlist[page] or rewriteauthor ==1:
					found =1 
					rewriteauthor = 0
					
					authpages.append(page)
					pagetitle = titlelist[page]
					pagerating = ratinglist[page]
					totalrating = totalrating + pagerating
					if "scp" in taglist[page]:
						scptotal +=1
						scprating+=pagerating
					if  "tale" in taglist[page]:
						taletotal+=1
					if  "goi-format" in taglist[page]:
						goitotal+=1
				try:
					if inp.lower() in authorlist[page].lower(): #this just matches the author with the first author match
						if inp.lower() == authorlist[page].lower():
							exact +=1
						if exact == 1:
							author = authorlist[page]
							authpages = []
							multimatch = [authorlist[page]]
						elif exact < 1:
							multimatch.append(authorlist[page])
						if inp.lower() in authorlist[page].lower() and found == 0:
							author = authorlist[page]
							if ":rewrite:" in authorlist[page]:
								bothauths = authorlist[page].split(":rewrite:")
								orgauth = bothauths[0]
								newauth = bothauths[1]
								if inp.lower() in orgauth.lower():
									author = orgauth
								if inp.lower() in newauth.lower():
									author = newauth 
								rewrite = 1
							found = 1
							authpages.append(page)
							pagetitle = titlelist[page]
							pagerating = ratinglist[page] 
							totalrating = totalrating + pagerating
							if "scp" in taglist[page]:
								scptotal +=1
								scprating+=pagerating
							if  "tale" in taglist[page]:
								taletotal+=1
							if  "goi-format" in taglist[page]:
								goitotal+=1
				except AttributeError:
					pass
			else:
				if "author" in taglist[page]:
					if ":rewrite:" in authorlist[page]:
						bothauths = authorlist[page].split(":rewrite:")
						orgauth = bothauths[0]
						newauth = bothauths[1]
						if newauth == author:
							authorpage = "http://scp-wiki.net/"+page+" - "
					if author == authorlist[page]:
						authorpage = "http://scp-wiki.net/"+page+" - "
	except KeyError:
		pass
	plusauth = []
	moreauthors = 1
	plusauth.append(author)
	for authors in multimatch: #checks to see if multiple authors found 
		z =0 
		if ":rewrite:" in authors:
			continue 
		for foundauthor in plusauth:
			if foundauthor ==authors:
				z =1
		if authors != author:
			if z == 0:
				moreauthors +=1
				plusauth.append(authors)
	if moreauthors>1:
		x = 0
		final = "Did you mean "
		for auth in plusauth:
			x+=1
			if x ==1:
				final+=auth+""
			if x ==2 and moreauthors ==2:
				final+=" or "+auth+"?"
			if x==2 and moreauthors >2:
				final+=", "+auth+""
			if x==3 and moreauthors ==3:
				final += ", or "+auth+"?"
			if x==3 and moreauthors >3:
				final += ", or "+auth+"? With " + str(moreauthors) + " more authors matching your query."
		return final
	avgrating = 0
	if taletotal+scptotal+goitotal is not 0: #just so no division by zero
		avgrating = totalrating/(taletotal+scptotal+goitotal)
	if not authpages: #if no author pages are added 
		return "Author not found."
	api = wikidotapi.connection()
	api.Site = "alexandra-scp"
	table = ""
	for page in authpages:
		table+="||**"+titlelist[page]+"**||Rating: "+str(ratinglist[page])+"||Tags: "+str(taglist[page])+"|| http://scp-wiki.net/"+page+"||\n"
	api.set_page_item(author.replace(" ","-").lower(), "content", "**Pages Authored:"+str(len(authpages))+"**\n \n**Number of SCPs Written:** "+str(scptotal)+"\n**Number of Tales Written:** "+str(taletotal)+"\n**Number of GOI Formats Written:** "+str(goitotal)+"\n \n**Net Upvotes Received:** "+str(totalrating)+"\n**Average Rating per Page:** "+str(avgrating)+"\n**Average SCP Rating:** "+str(scprating/scptotal)+"\n++ Articles\n||Title||Rating||Tags||Link||\n"+table, create=True)
	return "Author detail page created http://alexandra-scp.wikidot.com/"+author.replace(" ","-").lower()
def cache_refresh(): #calls itself automatically once called for the first time
	threading.Timer(3600, cache_refresh).start (); 
	api = wikidotapi.connection()
	#overwrite update
	overwritecache = {}
	api.Site = "05command"
	pages = api.refresh_pages() #refresh page list provided by the API, is only a list of strings
	source = api.server.pages.get_one({"site":api.Site,"page":"alexandra-rewrite"})
	content = source["content"]
	fs = content.split("-----")
	rewritelist = fs[1]
	invrewrite = rewritelist.split("\n")
	for rewrite in invrewrite:
		parts = rewrite.split("||")
		val = 0
		writelist = []
		first = ""
		author = ""
		for part in parts:
			val+=1
			if val ==2:
				#page
				first = part 
			if val ==3:
				#author
				author = part 
		if first != "Page":
			overwritecache[first.lower()] = author 
	print "Rewrite update complete."
	#file reading
	localauthorlist = {}
	localtitlelist = {}
	localtaglist = {}
	localratinglist = {}
	scpcache = {}
	api.Site = "scp-wiki"
	pages = api.refresh_pages()
	__builtin__.scppages = api.refresh_pages()
	try:
		with open("cache.cache","rb") as f:
			scpcache = pickle.load(f)
	except EOFError:
		pass
	if len(scpcache) != 0:
		print "Reading cache"
		__builtin__.scppagecache = scpcache
		for page in pages:
			for item in scpcache:
				try:
					localauthorlist[page] = item[page]["created_by"]
					if localauthorlist[page] == None:
						localauthorlist[page] = ""
					localtitlelist[page] = item[page]["title"]
					localtaglist[page] = item[page]["tags"]
					if localtaglist[page] == None:
						localtaglist[page] = ""
					localratinglist[page] = item[page]["rating"]
					if overwritecache[page.lower()]:
						localauthorlist[page] = localauthorlist[page]+":rewrite:"+overwritecache[page.lower()]
						if ":override:" in localauthorlist[page]:
							bothauths = localauthorlist[page].split(":rewrite:")
							newauth = bothauths[1]
							localauthorlist[page] = newauth[10:]
				except KeyError:
				 pass
	__builtin__.authorlist = localauthorlist
	__builtin__.titlelist = localtitlelist
	__builtin__.taglist = localtaglist
	__builtin__.ratinglist = localratinglist
	#scp titles 
	print "Updating SCP titles"
	localscptitles = {}
	api.site = "scp-wiki"
	pages = api.refresh_pages()
	page_one_blank = api.server.pages.get_one({"site":api.Site,"page":"scp-series"})
	page_one = page_one_blank["content"]
	page_two_blank = api.server.pages.get_one({"site":api.Site,"page":"scp-series-2"})
	page_two = page_two_blank["content"]
	page_three_blank = api.server.pages.get_one({"site":api.Site,"page":"scp-series-3"})
	page_three = page_three_blank["content"]
	page_j_blank = api.server.pages.get_one({"site":api.Site,"page":"joke-scps"})
	page_j = page_j_blank["content"]
	page_arc_b = api.server.pages.get_one({"site":api.Site,"page":"archived-scps"})
	page_arc = page_arc_b["content"]
	page_decon_b = api.server.pages.get_one({"site":api.Site,"page":"decommissioned-scps"})
	page_decon = page_decon_b["content"]
	page_ex_b = api.server.pages.get_one({"site":api.Site,"page":"scp-ex"})
	page_ex = page_ex_b["content"]
	
	page_one_split = page_one.split("\n")
	for part in page_one_split:
		if part.startswith("* [[[SCP-"):
			num = part[9:12]
			title = part[18:].encode("ascii","ignore")
			page = "scp-"+num  
			if "]" not in num:
				localscptitles[page.lower()] = title
	page_two_split = page_two.split("\n")
	for part in page_two_split:
		if part.startswith("* [[[SCP-"):
			num = part[9:13]
			title = part[19:].encode("ascii","ignore")
			page = "scp-"+num  
			if "]" not in num:
				localscptitles[page.lower()] = title
	page_three_split = page_three.split("\n")
	for part in page_three_split:
		if part.startswith("* [[[SCP-"):
			num = part[9:13]
			title = part[19:].encode("ascii","ignore")
			page = "scp-"+num  
			if "]" not in num:
				localscptitles[page.lower()] = title
	page_j_split = page_j.split("\n")
	for part in page_j_split:
		if part.startswith("* [[[SCP-"):
			segments = part.split(" - ")
			first = segments[0]
			end=first.index("]")
			num = part[9:end]
			if "|" in first:
				secend = num.index("|")
				num = num[:secend]
			title = segments[1].encode("ascii","ignore")
			page = "scp-"+num
			localscptitles[page.lower()] = title
	page_arc_split = page_arc.split("\n")
	for part in page_arc_split:
		if part.startswith("* [[[SCP-"):
			segments = part.split(" - ")
			first = segments[0]
			end=first.index("]")
			num = part[9:end]
			title = segments[1].encode("ascii","ignore")
			page = "scp-"+num
			localscptitles[page.lower()] = title
	page_decon_split = page_decon.split("\n")
	for part in page_decon_split:
		if part.startswith("* [[[SCP-"):
			segments = part.split(" - ")
			first = segments[0]
			end=first.index("]")
			num = part[9:end]
			title = segments[1].encode("ascii","ignore")
			page = "decomm:scp-"+num
			localscptitles[page.lower()] = title
	page_ex_split = page_ex.split("\n")
	for part in page_ex_split:
		if part.startswith("* [[[SCP-"):
			segments = part.split(" - ")
			first = segments[0]
			end=first.index("]")
			num = part[9:end]
			title = segments[1].encode("ascii","ignore")
			page = "scp-"+num
			localscptitles[page.lower()] = title
	__builtin__.scptitles = localscptitles
	#WL
	__builtin__.callsmade = 0
	api.Site = "wanderers-library"
	__builtin__.wlpages = api.refresh_pages()
	pages = api.refresh_pages()
	__builtin__.totalpagescurcache = len(pages)
	print "Refreshing WL cache"
	newpagecache = [] #the newpagecache is so that while it is updating you can still use the old one
	for page in pages:
		newpagecache.append(api.server.pages.get_meta({"site": api.Site, "pages": [page]}))
		time.sleep(0.4) #this keeps the api calls within an acceptable threshold
		__builtin__.callsmade+=1
	print "Cache refreshed!"
	__builtin__.pagecache= newpagecache #__builtin__ means that pagecache is global and can be used by plugins
	ts = time.time()
	__builtin__.lastcacherefresh = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')

	#SCP
	#SCP
	__builtin__.callsmade = 0
	api.Site = "scp-wiki"
	pages = api.refresh_pages()
	__builtin__.totalpagescurcache = len(pages)
	print "Refreshing SCP cache"
	__builtin__.totalpagescurcache = len(pages)
	newpagecache = [] #the newpagecache is so that while it is updating you can still use the old one
	localauthorlist = {}
	localtitlelist = {}
	localtaglist = {}
	localratinglist = {}
	for page in pages:
		x = api.server.pages.get_meta({"site": api.Site, "pages": [page]})
		cache = {}
		cache[page] = x[page]
		try:
			localauthorlist[page] = cache[page]["created_by"]
			if localauthorlist[page] == None:
				localauthorlist[page] = ""
			localtitlelist[page] = cache[page]["title"]
			localtaglist[page] = cache[page]["tags"]
			if localtaglist[page] == None:
				localtaglist[page] = ""
			localratinglist[page] = cache[page]["rating"]
			if overwritecache[page.lower()]:
				localauthorlist[page] = localauthorlist[page]+":rewrite:"+overwritecache[page.lower()]
				if ":override:" in localauthorlist[page]:
					bothauths = localauthorlist[page].split(":rewrite:")
					newauth = bothauths[1]
					localauthorlist[page] = newauth[10:]
		except KeyError:
			pass 
		newpagecache.append(x)
		time.sleep(0.3) #this keeps the api calls within an acceptable threshold
		__builtin__.callsmade +=1 
	__builtin__.authorlist = localauthorlist
	__builtin__.titlelist = localtitlelist
	__builtin__.taglist = localtaglist
	__builtin__.ratinglist = localratinglist
	print "Cache refreshed!"
	__builtin__.scppagecache= newpagecache #__builtin__ means that pagecache is global and can be used by plugins

	with open("cache.cache","wb") as f:
		pickle.dump(newpagecache,f)

	#end	
	ts = time.time()
	lastcacherefresh = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def author(inp):
	".author <Author Name> -- Will return details regarding the author"
	api = wikidotapi.connection()
	api.Site = "scp-wiki"
	pages = api.refresh_pages()
	authpages = []
	totalrating = 0
	taletotal = 0
	scptotal = 0
	pagerating = 0
	author = "None"
	multimatch = []
	authorpage = ""
	try:
		for page in pages:
			if "scp" in taglist[page] or "tale" in taglist[page]: #makes sure only articles are counted
				if author == authorlist[page]:
					authpages.append(page)
					pagetitle = titlelist[page]
					pagerating = api.get_page_item(page,"rating")
					totalrating = totalrating + pagerating
					if "scp" in taglist[page]:
						scptotal +=1
					if  "tale" in taglist[page]:
						taletotal+=1
				try:
					if inp.lower() in authorlist[page].lower(): #this just matches the author with the first author match
						multimatch.append(authorlist[page])
						if author == "None":
							author = authorlist[page]
							authpages.append(page)
							pagetitle = titlelist[page]
							pagerating = api.get_page_item(page,"rating")
							totalrating = totalrating + pagerating
							if "scp" in taglist[page]:
								scptotal +=1
							if  "tale" in taglist[page]:
								taletotal+=1
				except AttributeError:
					pass
			else:
				if "author" in taglist[page]:
					if author == authorlist[page]:
						authorpage = "http://scp-wiki.net/"+page+" - "
						if author == "DrEverettMann": #hardcode because yes
							authorpage = "http://www.scp-wiki.net/dr-manns-personnel-file - "

	except KeyError:
		pass
	plusauth = []
	moreauthors = 1
	plusauth.append(author)
	for authors in multimatch: #checks to see if multiple authors found 
		z =0 
		for foundauthor in plusauth:
			if foundauthor ==authors:
				z =1
		if authors != author:
			if z == 0:
				moreauthors +=1
				plusauth.append(authors)

	if moreauthors>1:
		x = 0
		final = "Did you mean "
		for auth in plusauth:
			x+=1
			if x ==1:
				final+=auth+""
			if x ==2 and moreauthors ==2:
				final+=" or "+auth+"?"
			if x==2 and moreauthors >2:
				final+=", "+auth+""
			if x==3 and moreauthors ==3:
				final += ", or  "+auth+"?"
			if x==3 and moreauthors >3:
				final += ", or  "+auth+"? With " + str(moreauthors) + " more authors matching your query."
		return final
	avgrating = 0
	if taletotal+scptotal is not 0: #just so no division by zero
		avgrating = totalrating/(taletotal+scptotal)
	if not authpages: #if no author pages are added 
		return "Author not found."
	
	return authorpage+""+author +" has written " + str(scptotal) + " SCPs and "+str(taletotal)+" tales. They have " + str(totalrating)+ " net upvotes with an average rating of " + str(avgrating) + ". Their most recent article is " + pagetitle + "(Rating:" + str(pagerating) + ")"#+"- http://scp-wiki.net/" + authpages[-1].lower()
def scpregex(match):
	if ' ' not in match.string:
		if match.string.lower().startswith("scp-") or match.string.lower().startswith("!scp-"):
			page = re.sub("[!,_,?,,,',.]",'',match.string.lower())
			if "--" in page:
				count = page.index("-")
				page = page[:count]+page[count+1:]
			try:
				rating = ratinglist[page]
				ratesign = ""
				if rating >= 0:
					ratesign = "+" #adds + or minus sign in front of rating
				ratestring = "Rating:"+ratesign+str(rating)+"" 
				author = authorlist[page]
				if author == "":
					author = "unknown"
				authorstring = "Written by "+author
				if ":rewrite:" in author:
					bothauths = authorlist[page].split(":rewrite:")
					orgauth = bothauths[0]
					newauth = bothauths[1]
					authorstring = "Originally written by "+orgauth +", rewritten by "+newauth
				title = titlelist[page]
				scptitle = scptitles[page]
				sepstring = ", "
				link = "http://scp-wiki.net/"+page.lower() 
				return ""+title+" ("+scptitle+sepstring+authorstring+sepstring+ratestring+") - "+link 
			except KeyError:
				api = wikidotapi.connection() 
				api.Site = "scp-wiki"
				rating = api.get_page_item(page,"rating")
				ratesign = ""
				if rating >= 0:
					ratesign = "+" #adds + or minus sign in front of rating
				ratestring = "Rating:"+ratesign+str(rating)+"" 
				author = api.get_page_item(page,"created_by")
				if author == "":
					author = "unknown"
				authorstring = "Written by "+author
				title = api.get_page_item(page,"title")
				sepstring = ", "
				link = "http://scp-wiki.net/"+page.lower() 
				return ""+title+" ("+ratestring+sepstring+authorstring+") - "+link 
			#return "Page does not exist, but you can create it here: " + "http://scp-wiki.net/"+page
	else:
		matches = match.string.lower().split()
		scp_match = ""
		for part in matches:
			if part.startswith("!scp-"):
				page = re.sub("[!,_,?,,,',.]",'',part.lower())
				try:
					rating = ratinglist[page]
					ratesign = ""
					if rating >= 0:
						ratesign = "+" #adds + or minus sign in front of rating
					ratestring = "Rating:"+ratesign+str(rating)+"" 
					author = authorlist[page]
					if author == "":
						author = "unknown"
					authorstring = "Written by "+author
					if ":rewrite:" in author:
						bothauths = authorlist[page].split(":rewrite:")
						orgauth = bothauths[0]
						newauth = bothauths[1]
						authorstring = "Originally written by "+orgauth +", rewritten by "+newauth
					title = titlelist[page]
					scptitle = scptitles[page]
					sepstring = ", "
					link = "http://scp-wiki.net/"+page.lower() 
					return ""+title+" ("+scptitle+sepstring+authorstring+sepstring+ratestring+") - "+link 
				except KeyError:
					api = wikidotapi.connection() 
					api.Site = "scp-wiki"
					rating = api.get_page_item(page,"rating")
					ratesign = ""
					if rating >= 0:
						ratesign = "+" #adds + or minus sign in front of rating
					ratestring = "Rating:"+ratesign+str(rating)+"" 
					author = api.get_page_item(page,"created_by")
					if author == "":
						author = "unknown"
					authorstring = "Written by "+author
					title = api.get_page_item(page,"title")
					sepstring = ", "
					link = "http://scp-wiki.net/"+page.lower() 
					return ""+title+" ("+ratestring+sepstring+authorstring+") - "+link 
def cache_refresh(
):  #calls itself automatically once called for the first time
    twitter_api = twitter.Api(consumer_key='',
                              consumer_secret='',
                              access_token_key='2288772386-',
                              access_token_secret='')
    threading.Timer(3600, cache_refresh).start()
    api = wikidotapi.connection()
    #overwrite update
    overwritecache = {}
    api.Site = "05command"
    pages = api.refresh_pages(
    )  #refresh page list provided by the API, is only a list of strings
    source = api.server.pages.get_one({
        "site": api.Site,
        "page": "alexandra-rewrite"
    })
    content = source["content"]
    fs = content.split("-----")
    rewritelist = fs[1]
    invrewrite = rewritelist.split("\n")
    for rewrite in invrewrite:
        parts = rewrite.split("||")
        val = 0
        writelist = []
        first = ""
        author = ""
        for part in parts:
            val += 1
            if val == 2:
                #page
                first = part
            if val == 3:
                #author
                author = part
        if first != "Page":
            overwritecache[first.lower()] = author
    print "Rewrite update complete."
    source = api.server.pages.get_one({
        "site": api.Site,
        "page": "alexandra-glossary"
    })
    content = source["content"]
    fs = content.split("-----")
    glossarylist = fs[1]
    invglossary = glossarylist.split("\n")
    localglossary = {}
    for terms in invglossary:
        parts = terms.split("||")
        val = 0
        writelist = []
        first = ""
        definition = ""
        for part in parts:
            val += 1
            if val == 2:
                #word
                first = part
            if val == 3:
                #def
                definition = part
        if first != "Word":
            localglossary[first] = definition
    __builtin__.termlist = localglossary
    print "Glossary update complete."
    #file reading
    localauthorlist = {}
    localtitlelist = {}
    localtaglist = {}
    localratinglist = {}
    scpcache = {}
    api.Site = "scp-wiki"
    pages = api.refresh_pages()
    __builtin__.scppages = api.refresh_pages()
    try:
        with open("cache.cache", "rb") as f:
            scpcache = pickle.load(f)
    except EOFError:
        pass
    if len(scpcache) != 0:
        print "Reading cache"
        __builtin__.scppagecache = scpcache
        for page in pages:
            for item in scpcache:
                try:
                    localauthorlist[page] = item[page]["created_by"]
                    if localauthorlist[page] == None:
                        localauthorlist[page] = ""
                    localtitlelist[page] = item[page]["title"]
                    localtaglist[page] = item[page]["tags"]
                    if localtaglist[page] == None:
                        localtaglist[page] = ""
                    localratinglist[page] = item[page]["rating"]
                    if overwritecache[page.lower()]:
                        localauthorlist[page] = localauthorlist[
                            page] + ":rewrite:" + overwritecache[page.lower()]
                        if ":override:" in localauthorlist[page]:
                            bothauths = localauthorlist[page].split(
                                ":rewrite:")
                            newauth = bothauths[1]
                            localauthorlist[page] = newauth[10:]
                except KeyError:
                    pass
    __builtin__.authorlist = localauthorlist
    __builtin__.titlelist = localtitlelist
    __builtin__.taglist = localtaglist
    __builtin__.ratinglist = localratinglist
    #scp titles
    print "Updating SCP titles"
    localscptitles = {}
    api.site = "scp-wiki"
    pages = api.refresh_pages()
    page_one_blank = api.server.pages.get_one({
        "site": api.Site,
        "page": "scp-series"
    })
    page_one = page_one_blank["content"]
    page_two_blank = api.server.pages.get_one({
        "site": api.Site,
        "page": "scp-series-2"
    })
    page_two = page_two_blank["content"]
    page_three_blank = api.server.pages.get_one({
        "site": api.Site,
        "page": "scp-series-3"
    })
    page_three = page_three_blank["content"]
    page_j_blank = api.server.pages.get_one({
        "site": api.Site,
        "page": "joke-scps"
    })
    page_j = page_j_blank["content"]
    page_arc_b = api.server.pages.get_one({
        "site": api.Site,
        "page": "archived-scps"
    })
    page_arc = page_arc_b["content"]
    page_decon_b = api.server.pages.get_one({
        "site": api.Site,
        "page": "decommissioned-scps"
    })
    page_decon = page_decon_b["content"]
    page_ex_b = api.server.pages.get_one({"site": api.Site, "page": "scp-ex"})
    page_ex = page_ex_b["content"]

    page_one_split = page_one.split("\n")
    for part in page_one_split:
        if part.startswith("* [[[SCP-"):
            num = part[9:12]
            title = part[18:].encode("ascii", "ignore")
            page = "scp-" + num
            if "]" not in num:
                localscptitles[page.lower()] = title
    page_two_split = page_two.split("\n")
    for part in page_two_split:
        if part.startswith("* [[[SCP-"):
            num = part[9:13]
            title = part[19:].encode("ascii", "ignore")
            page = "scp-" + num
            if "]" not in num:
                localscptitles[page.lower()] = title
    page_three_split = page_three.split("\n")
    for part in page_three_split:
        if part.startswith("* [[[SCP-"):
            num = part[9:13]
            title = part[19:].encode("ascii", "ignore")
            page = "scp-" + num
            if "]" not in num:
                localscptitles[page.lower()] = title
    page_j_split = page_j.split("\n")
    for part in page_j_split:
        if part.startswith("* [[[SCP-"):
            segments = part.split(" - ")
            first = segments[0]
            end = first.index("]")
            num = part[9:end]
            if "|" in first:
                secend = num.index("|")
                num = num[:secend]
            title = segments[1].encode("ascii", "ignore")
            page = "scp-" + num
            localscptitles[page.lower()] = title
    page_arc_split = page_arc.split("\n")
    for part in page_arc_split:
        if part.startswith("* [[[SCP-"):
            segments = part.split(" - ")
            first = segments[0]
            end = first.index("]")
            num = part[9:end]
            title = segments[1].encode("ascii", "ignore")
            page = "scp-" + num
            localscptitles[page.lower()] = title
    page_decon_split = page_decon.split("\n")
    for part in page_decon_split:
        if part.startswith("* [[[SCP-"):
            segments = part.split(" - ")
            first = segments[0]
            end = first.index("]")
            num = part[9:end]
            title = segments[1].encode("ascii", "ignore")
            page = "decomm:scp-" + num
            localscptitles[page.lower()] = title
    page_ex_split = page_ex.split("\n")
    for part in page_ex_split:
        if part.startswith("* [[[SCP-"):
            segments = part.split(" - ")
            first = segments[0]
            end = first.index("]")
            num = part[9:end]
            title = segments[1].encode("ascii", "ignore")
            page = "scp-" + num
            localscptitles[page.lower()] = title
    __builtin__.scptitles = localscptitles
    #WL
    __builtin__.callsmade = 0
    api.Site = "wanderers-library"
    __builtin__.wlpages = api.refresh_pages()
    pages = api.refresh_pages()
    __builtin__.totalpagescurcache = len(pages)
    print "Refreshing WL cache"
    newpagecache = [
    ]  #the newpagecache is so that while it is updating you can still use the old one
    for page in pages:
        newpagecache.append(
            api.server.pages.get_meta({
                "site": api.Site,
                "pages": [page]
            }))
        time.sleep(
            0.4)  #this keeps the api calls within an acceptable threshold
        __builtin__.callsmade += 1
    print "Cache refreshed!"
    __builtin__.pagecache = newpagecache  #__builtin__ means that pagecache is global and can be used by plugins
    ts = time.time()
    __builtin__.lastcacherefresh = datetime.datetime.fromtimestamp(
        ts).strftime('%Y-%m-%d %H:%M:%S')

    #SCP
    #SCP
    __builtin__.callsmade = 0
    api.Site = "scp-wiki"
    pages = api.refresh_pages()
    __builtin__.totalpagescurcache = len(pages)
    print "Refreshing SCP cache"
    __builtin__.totalpagescurcache = len(pages)
    newpagecache = [
    ]  #the newpagecache is so that while it is updating you can still use the old one
    localauthorlist = {}
    localtitlelist = {}
    localtaglist = {}
    localratinglist = {}
    __builtin__.oldratinglist = ratinglist
    for page in pages:
        x = api.server.pages.get_meta({"site": api.Site, "pages": [page]})
        cache = {}
        cache[page] = x[page]
        try:
            localauthorlist[page] = cache[page]["created_by"]
            if localauthorlist[page] == None:
                localauthorlist[page] = ""
            localtitlelist[page] = cache[page]["title"]
            localtaglist[page] = cache[page]["tags"]
            if localtaglist[page] == None:
                localtaglist[page] = ""
            localratinglist[page] = cache[page]["rating"]
            if overwritecache[page.lower()]:
                localauthorlist[page] = localauthorlist[
                    page] + ":rewrite:" + overwritecache[page.lower()]
                if ":override:" in localauthorlist[page]:
                    bothauths = localauthorlist[page].split(":rewrite:")
                    newauth = bothauths[1]
                    localauthorlist[page] = newauth[10:]
        except KeyError:
            pass
        newpagecache.append(x)
        time.sleep(
            0.3)  #this keeps the api calls within an acceptable threshold
        __builtin__.callsmade += 1
    __builtin__.authorlist = localauthorlist
    __builtin__.titlelist = localtitlelist
    __builtin__.taglist = localtaglist
    __builtin__.ratinglist = localratinglist

    print "Cache refreshed!"
    statuses = twitter_api.GetUserTimeline("scpwiki")
    final = ""
    for page in pages:
        done = 0
        for s in statuses:
            if titlelist[page].lower() in s.text.lower():
                done = 1
        if done == 0:
            try:
                if oldratinglist[page] < 20:
                    if ratinglist[page] >= 20:
                        try:
                            if scptitles[page]:
                                final = scptitles[page] + "- " + titlelist[
                                    page] + " by " + authorlist[
                                        page] + ". http://scp-wiki.net/" + page
                                status = twitter_api.PostUpdate(final)
                        except KeyError:
                            final = "[ACCESS GRANTED] " + titlelist[
                                page] + " by " + authorlist[
                                    page] + ". http://scp-wiki.net/" + page
                            status = twitter_api.PostUpdate(final)
            except KeyError:
                if ratinglist[page] >= 20:
                    try:
                        if scptitles[page]:
                            final = scptitles[page] + "- " + titlelist[
                                page] + " by " + authorlist[
                                    page] + ". http://scp-wiki.net/" + page
                            status = twitter_api.PostUpdate(final)
                    except KeyError:
                        final = "[ACCESS GRANTED] " + titlelist[
                            page] + " by " + authorlist[
                                page] + ". http://scp-wiki.net/" + page
                        status = twitter_api.PostUpdate(final)
    print "Tweets sent"
    __builtin__.scppagecache = newpagecache  #__builtin__ means that pagecache is global and can be used by plugins

    with open("cache.cache", "wb") as f:
        pickle.dump(newpagecache, f)

    #end
    ts = time.time()
    lastcacherefresh = datetime.datetime.fromtimestamp(ts).strftime(
        '%Y-%m-%d %H:%M:%S')
Exemple #39
0
def author(inp):
    ".author <Author Name> -- Will return details regarding the author"
    if firstrefresh == 0:  #make sure the cache actually exists
        return "Cache has not yet updated, please wait a minute and search again."
    api = wikidotapi.connection()
    api.Site = "wanderers-library"
    pages = api.refresh_pages()
    authpages = []
    totalrating = 0
    pagetotal = 0
    pagerating = 0
    author = "None"
    multimatch = []
    authorpage = ""
    for page in pages:
        for item in pagecache:  #these two for loops iterate through every item within each page dictionary, the proper syntax for accessing a specific item is item[page][itemname],
            try:
                if "entry" in item[page][
                        "tags"]:  #makes sure only articles are counted
                    if author == item[page]["created_by"]:
                        authpages.append(page)
                        pagetitle = item[page]["title"]
                        pagerating = item[page]["rating"]
                        totalrating = totalrating + pagerating
                        print page
                        pagetotal = pagetotal + 1
                    if inp.lower() in item[page]["created_by"].lower(
                    ) and author == "None":  #this just matches the author with the first author match
                        author = item[page]["created_by"]
                        authpages.append(page)
                        pagetitle = item[page]["title"]
                        pagerating = item[page]["rating"]
                        totalrating = totalrating + pagerating
                        print page
                        pagetotal = pagetotal + 1  #all lines above provide page data, math is pretty easy and self-explanatory
                else:
                    if "author" in item[page]["tags"]:
                        if author == item[page]["created_by"]:
                            authorpage = "http://wanderers-library.wikidot.com/" + item[
                                page]["fullname"] + " - "
            except KeyError:  #must do error handling for code to be valid, iterates through incorrect keys multiple times, do not print things in the except clause, slows down program immensely
                pass
    for page in pages:  #this loop checks to see if multiple authors match input
        for item in pagecache:
            try:
                if "entry" in item[page]["tags"]:
                    if inp.lower() in item[page]["created_by"].lower():
                        multimatch.append(item[page]["created_by"])
            except KeyError:
                pass
    for authors in multimatch:  #checks to see if multiple authors found
        if authors != author:
            return "There are " + str(
                len(multimatch)
            ) + " authors matching you query. Please be more specifc. "
    avgrating = 0
    if pagetotal is not 0:  #just so no division by zero
        avgrating = totalrating / pagetotal
    if not authpages:  #if no author pages are added
        return "Author not found."
    return "nonick::" + authorpage + "" + author + " has written " + str(
        pagetotal
    ) + " pages. They have " + str(
        totalrating
    ) + " net upvotes with an average rating of " + str(
        avgrating
    ) + ". Their most recent article is " + pagetitle + "(Rating:" + str(
        pagerating
    ) + ")"  #+"- http://wanderers-library.wikidot.com/" + authpages[-1].lower()
			try:
				rating = ratinglist[page]
				ratesign = ""
				if rating >= 0:
					ratesign = "+" #adds + or minus sign in front of rating
				ratestring = "Rating:"+ratesign+str(rating)+"" 
				author = authorlist[page]
				authorstring = makeauthorstring(author)
				title = titlelist[page]
				scptitle = scptitles[page]
				sepstring = ", "
				link = "http://scp-wiki.net/"+page.lower() 
				return ""+title+" ("+scptitle+sepstring+authorstring+sepstring+ratestring+") - "+link 
<<<<<<< HEAD
			except KeyError as e:
				api = wikidotapi.connection() 
				api.Site = "scp-wiki"
				if api.page_exists(page):
					rating = api.get_page_item(page,"rating")
					ratesign = ""
					if rating >= 0:
						ratesign = "+" #adds + or minus sign in front of rating
					ratestring = "Rating:"+ratesign+str(rating)+"" 
					author = api.get_page_item(page,"created_by")
					if author == "":
						author = "unknown"
					authorstring = "Written by "+author
					title = api.get_page_item(page,"title")
					sepstring = ", "
					link = "http://scp-wiki.net/"+page.lower() 
					return ""+title+" ("+ratestring+sepstring+authorstring+") - "+link 
Exemple #41
0
from whiffle import wikidotapi

api = wikidotapi.connection()
pages = api.Pages
total = 0
for page in pages:
	total += 1
print total

@hook.command
def author(inp):
	".author <Author Name> -- Will return details regarding the author"
	authpages = []
	item for item in pagecache:
		if item["created_by"] == inp:
			authpages.append(item)
	total = 0
	pagetotal = 0
	for page in authpages:
		total += page["rating"]
		pagetotal += 1
	return inp +" has created " + pagetotal + " pages. With an average rating of "+ total/pagetotal+ ". Their most recently created page is " + authpages[-1]["title"]
	
	
	
pagecache = []

def refresh_cache():
	api = wikidotapi.connection()
	pages =  api.refresh_pages()
	for page in pages: