def buildMangaReplyWithAuthor(searchText, authorName, isExpanded, baseComment, blockTracking=False): try: ani = Anilist.getMangaWithAuthor(searchText, authorName) mal = None mu = None ap = None if ani: mal = MAL.getMangaCloseToDescription(searchText, ani['description']) ap = AniP.getMangaURL(ani['title_english'], authorName) else: ap = AniP.getMangaURL(searchText, authorName) mu = MU.getMangaWithAuthor(searchText, authorName) if ani: try: titleToAdd = '' if mal is not None: titleToAdd = mal['title'] else: titleToAdd = ani['title_english'] if (str(baseComment.subreddit).lower is not 'nihilate') and (str(baseComment.subreddit).lower is not 'roboragi') and not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'Manga', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass return CommentBuilder.buildMangaComment(isExpanded, mal, ani, mu, ap) except Exception as e: traceback.print_exc() return None
def buildMangaReplyWithAuthor(searchText, authorName, message, isExpanded, blockTracking=False): try: ani = Anilist.getMangaWithAuthor(searchText, authorName) mal = None mu = None ap = None if ani: mal = MAL.getMangaCloseToDescription(searchText, ani['description']) ap = AniP.getMangaURL(ani['title_english'], authorName) else: ap = AniP.getMangaURL(searchText, authorName) mu = MU.getMangaWithAuthor(searchText, authorName) if ani: try: titleToAdd = '' if mal is not None: titleToAdd = mal['title'] else: titleToAdd = ani['title_english'] if not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'Manga', message.author.id, message.server.id) except: traceback.print_exc() pass return CommentBuilder.buildMangaComment(isExpanded, mal, ani, mu, ap) except Exception as e: traceback.print_exc() return None
async def on_message(message): print('Message recieved') #Is the message valid (i.e. it's not made by Discordoragi and I haven't seen it already). If no, try to add it to the "already seen pile" and skip to the next message. If yes, keep going. if not (DiscordoragiSearch.isValidMessage(message)): try: if not (DatabaseHandler.messageExists(message.id)): DatabaseHandler.addMessage(message.id, message.author.id, message.server.id, False) except Exception: traceback.print_exc() pass else: await process_message(message)
def run(self): try: print("Starting stream") commentStream = praw.helpers.comment_stream(self.reddit, self.subredditList, limit=1000, verbosity=0) for comment in commentStream: if ((time.time() - self.updateTime) > Config.tcgUpdateInterval * 60 * 60): DatabaseHandler.updateTCGCardlist() self.updateTime = time.time() if ((time.time() - self.submissionsLastProcessed) > Config.submissionProcessingInterval * 60 * 60): self.submissionProcessor.processSubmissions(100) self.submissionsLastProcessed = time.time() #print("Found comment") #If we've already seen this comment, ignore it if DatabaseHandler.commentExists(comment.id): continue #If the post has been deleted, getting the author will return an error try: author = comment.author.name except Exception as e: continue #If this is one of our own comments, ignore it if (author == 'YugiohLinkBot'): continue reply = self.requestHandler.buildResponse(comment.body) try: if reply: cards = re.findall('\[\*\*(.+?)\*\*\]\(', reply) for card in cards: DatabaseHandler.addRequest(card, author, comment.subreddit) if("VENT THREAD" in comment.link_title): reply = self.submissionProcessor.convertCase(True, reply) elif("happiness thread" in comment.link_title): reply = self.submissionProcessor.convertCase(False, reply) DatabaseHandler.addComment(comment.id, author, comment.subreddit, True) comment.reply(reply) print("Comment made.\n") else: if ('{' in comment.body and '}' in comment.body): print('') DatabaseHandler.addComment(comment.id, author, comment.subreddit, False) except Exception as e: print("Reddit probably broke when replying:" + str(e) + '\n') except Exception as e: print("Error with comment stream: " + str(e)) traceback.print_exc()
def buildMangaReply(searchText, isExpanded, baseComment): try: #Basic breakdown: #If Anilist finds something, use it to find the MAL version. #If hits either MAL or Ani, use it to find the MU version. #If it hits either, add it to the request-tracking DB. ani = Anilist.getMangaDetails(searchText) mal = None mu = None if not (ani is None): mal = MAL.getMangaDetails(ani['title_romaji']) else: mal = MAL.getMangaDetails(searchText) if not (mal is None): ani = Anilist.getMangaDetails(mal['title']) if (ani is not None) or (mal is not None): try: titleToAdd = '' if mal is not None: titleToAdd = mal['title'] mu = MU.getMangaURL(mal['title']) else: titleToAdd = ani['title_english'] mu = MU.getMangaURL(ani['title_romaji']) if (str(baseComment.subreddit).lower is not 'nihilate') and (str(baseComment.subreddit).lower is not 'roboragi'): DatabaseHandler.addRequest(titleToAdd, 'Manga', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass if ani is not None: if ani['adult'] is True: print("NSFW ENTRY") mal = None ani = None mu = None return CommentBuilder.buildMangaComment(isExpanded, mal, ani, mu) except Exception as e: traceback.print_exc() return None
def isValidMessage(message): try: if (DatabaseHandler.messageExists(message.id)): return False try: if (message.author.name == USERNAME): DatabaseHandler.addMessage(message.id, message.author.id, message.server.id, False) return False except: pass return True except: traceback.print_exc() return False
def isValidSubmission(submission): try: if (DatabaseHandler.commentExists(submission.id)): return False try: if (submission.author.name == 'Roboragi'): DatabaseHandler.addComment(submission.id, submission.author.name, submission.subreddit, False) return False except: pass return True except: traceback.print_exc() return False
def isValidComment(comment, reddit): try: if (DatabaseHandler.commentExists(comment.id)): return False try: if (comment.author.name == USERNAME): DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, False) return False except: pass return True except: traceback.print_exc() return False
def __init__(self, consumer_key, consumer_secret, access_key, access_secret, database, server): self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.access_key = access_key self.access_secret = access_secret self.database = database self.server = server self.db_users = DatabaseHandler(self.database, self.server)
class StoreUser(object): def __init__(self, consumer_key, consumer_secret, access_key, access_secret, database, server): self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.access_key = access_key self.access_secret = access_secret self.database = database self.server = server self.db_users = DatabaseHandler(self.database, self.server) def save_user(self, username): d = '{"_id": "' + username + '", "num_tweets": 1, "harvested": "true" }' try: user = json.loads(d) self.db_users.save(user) except couchdb.http.ResourceConflict: user = self.db_users.get_row(user['_id']) user['num_tweets'] += 1 self.db_users.save(user) def exists(self, username): try: user = self.db_users.get_row(username) if user is None: return False else: return True except: return False
def formatCardData(card, isExpanded): if isExpanded: requestStats = DatabaseHandler.getStats(card['name']) if card['cardtype'].lower() == 'monster': return MONSTER_CARD_TEMPLATE_EXPANDED.format( name = '[**{}**]'.format(card['name']), image = '({})'.format(card['image']) if card['image'] else '(http://i.imgur.com/paNkvJ5.jpg)', wikia = '[Wikia]({})'.format(card['wikia']), infosyntax = ', ' if card['pricedata'] else '', pricedata = '[($)]({})'.format(card['pricedata']) if card['pricedata'] else '', leveltype = '{}: '.format(card['leveltype']), level = '{}, '.format(card['level']), cardtype = 'Category: {}, '.format(card['cardtype'].title()), types = 'Type: {}, '.format(' / '.join(str(i[1]) for i in enumerate(card['types']))), attribute = 'Attribute: {}'.format(card['attribute'].upper()), text = '>{}'.format(card['text']), att = '>ATK: {}, '.format(card['att']), defn = 'DEF: {}'.format(card['def']), stats = 'Stats: {total} requests - {percentage}% of all requests'.format( total=requestStats['total'], percentage=str(round(requestStats['totalAsPercentage'],2)))) else: return SPELL_CARD_TEMPLATE_EXPANDED.format( name = '[**{}**]'.format(card['name']), image = '({})'.format(card['image']) if card['image'] else '(http://i.imgur.com/paNkvJ5.jpg)', wikia = '[Wikia]({})'.format(card['wikia']), infosyntax = ', ' if card['pricedata'] else '', pricedata = '[($)]({})'.format(card['pricedata']) if card['pricedata'] else '', cardtype = 'Category: {}, '.format(card['cardtype'].title()), cardproperty = 'Property: {}'.format(card['property']), text = '>{}'.format(card['text']), stats = 'Stats: {total} requests - {percentage}% of all requests'.format( total=requestStats['total'], percentage=str(round(requestStats['totalAsPercentage'],2)))) else: if card['cardtype'].lower() == 'monster': return MONSTER_CARD_TEMPLATE_NORMAL.format( name = '[**{}**]'.format(card['name']), image = '({})'.format(card['image']) if card['image'] else '(http://i.imgur.com/paNkvJ5.jpg)', wikia = '[Wikia]({})'.format(card['wikia']), infosyntax = ', ' if card['pricedata'] else '', pricedata = '[($)]({})'.format(card['pricedata']) if card['pricedata'] else '') else: return SPELL_CARD_TEMPLATE_NORMAL.format( name = '[**{}**]'.format(card['name']), image = '({})'.format(card['image']) if card['image'] else '(http://i.imgur.com/paNkvJ5.jpg)', wikia = '[Wikia]({})'.format(card['wikia']), infosyntax = ', ' if card['pricedata'] else '', pricedata = '[($)]({})'.format(card['pricedata']) if card['pricedata'] else '')
def start(): print('Starting comment stream:') last_checked_pms = time.time() #This opens a constant stream of comments. It will loop until there's a major error (usually this means the Reddit access token needs refreshing) comment_stream = praw.helpers.comment_stream(reddit, SUBREDDITLIST, limit=250, verbosity=0) for comment in comment_stream: # check if it's time to check the PMs if (time.time() - last_checked_pms) > TIME_BETWEEN_PM_CHECKS: process_pms() last_checked_pms = time.time() #Is the comment valid (i.e. it's not made by Roboragi and I haven't seen it already). If no, try to add it to the "already seen pile" and skip to the next comment. If yes, keep going. if not (Search.isValidComment(comment, reddit)): try: if not (DatabaseHandler.commentExists(comment.id)): DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, False) except: pass continue process_comment(comment)
def process_pms(): for msg in reddit.get_unread(limit=None): if (msg.subject == 'username mention'): if (('{' and '}') in msg.body) or (('<' and '>') in msg.body): try: if str(msg.subreddit).lower() in exiled: print('Edit request from exiled subreddit: ' + str(msg.subreddit) + '\n') msg.mark_as_read() continue mentionedComment = reddit.get_info(thing_id=msg.name) mentionedComment.refresh() if not (DatabaseHandler.commentExists(mentionedComment.id)): if str(mentionedComment.subreddit).lower() in Config.subredditlist: continue replies = mentionedComment.replies ownComments = [] commentToEdit = None for reply in replies: if (reply.author.name == 'Roboragi'): ownComments.append(reply) for comment in ownComments: if 'http://www.reddit.com/r/Roboragi/wiki/index' in comment.body: commentToEdit = comment commentReply = process_comment(mentionedComment, True) try: if (commentReply): if commentToEdit: commentToEdit.edit(commentReply) print('Comment edited.\n') else: mentionedComment.reply(commentReply) print('Comment made.\n') except praw.errors.Forbidden: print('Edit equest from banned subreddit: ' + str(msg.subreddit) + '\n') msg.mark_as_read() except Exception as e: print(e)
def processSubmissions(self, num): subreddits = self.reddit.get_subreddit(self.subredditList) for submission in subreddits.get_new(limit=num): #If we've already seen this submission, ignore it if DatabaseHandler.commentExists(submission.id): continue #If the post has been deleted, getting the author will return an error try: author = submission.author.name except Exception as e: continue #If this is one of our own submissions, ignore it if (author == 'YugiohLinkBot'): continue reply = self.requestHandler.buildResponse(submission.selftext) try: if reply: cards = re.findall('\[\*\*(.+?)\*\*\]\(', reply) for card in cards: DatabaseHandler.addRequest(card, author, submission.subreddit) if("VENT THREAD" in submission.title): reply = self.convertCase(True, reply) elif("happiness thread" in submission.title): reply = self.convertCase(False, reply) DatabaseHandler.addComment(submission.id, author, submission.subreddit, True) submission.add_comment(reply) print("Comment made.\n") else: if ('{' in submission.selftext and '}' in submission.selftext): print('') DatabaseHandler.addComment(submission.id, author, submission.subreddit, False) except Exception as e: traceback.print_exc() print("Reddit probably broke when replying:" + str(e) + '\n')
class StoreUser(object): # function: constructor # return: none # description: create connection with Twitter API def __init__(self, consumer_key, consumer_secret, access_key, access_secret, database, server): self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.access_key = access_key self.access_secret = access_secret self.database = database self.server = server self.db_users = DatabaseHandler(self.database, self.server) # function: save_user # return: None # description: save new user of update an existing user with the number of gathered tweets def save_user(self, username): d = '{"_id": "' + username + '", "num_tweets": 1, "harvested": "true" }' try: user = json.loads(d) self.db_users.save(user) except couchdb.http.ResourceConflict: user = self.db_users.get_row(user['_id']) user['num_tweets'] += 1 self.db_users.save(user) # function: exists # return: boolean # description: Verify if an user exists in database or not def exists(self, username): try: user = self.db_users.get_row(username) if user is None: return False else: return True except: return False
def buildStatsComment(subreddit): try: statComment = '' subreddit = str(subreddit) basicStats = DatabaseHandler.getBasicStats() subredditStats = DatabaseHandler.getSubredditStats(subreddit.lower()) #The overall stats section statComment += '**Overall Stats**\n\n' statComment += '/u/Roboragi has searched through ' + str(basicStats['totalComments']) statComment += ' unique comments and submissions and fulfilled a total of ' + str(basicStats['total']) statComment += ' requests across ' + str(basicStats['uniqueSubreddits']) + ' unique subreddit(s). ' statComment += 'A total of ' + str(basicStats['uniqueNames']) statComment += ' unique shows have been requested, with a mean value of ' + str(round(basicStats['meanValuePerRequest'],3)) statComment += ' requests/show and a standard deviation of ' + str(round(basicStats['standardDeviation'], 3)) + '.' statComment += '\n\n' statComment += 'The most frequently requested anime/manga overall are: \n' for i, request in enumerate(basicStats['topRequests']): statComment += str(i + 1) + '. ' + str(request[0]) + ' (' + str(request[1]) + ' - ' + str(request[2]) + ' requests) \n' statComment += '\n' statComment += 'The most frequent requesters overall are: \n' for i, requester in enumerate(basicStats['topRequesters']): statComment += str(i + 1) + '. /u/' + str(requester[0]) + ' (' + str(requester[1]) + ' requests) \n' #The subreddit specific section statComment += ' \n**This Subreddit**\n\n' if not (subredditStats is None): statComment += '/u/Roboragi has searched through ' + str(subredditStats['totalComments']) statComment += ' unique comments and submissions on /r/' + subreddit statComment += ' and fulfilled a total of ' + str(subredditStats['total']) + ' requests, ' statComment += 'representing ' + str(round(subredditStats['totalAsPercentage'], 2)) + '% of all requests. ' statComment += 'A total of ' + str(subredditStats['uniqueNames']) + ' unique shows have been requested here, ' statComment += 'with a mean value of ' + str(round(subredditStats['meanValuePerRequest'], 3)) + ' requests/show' statComment += ' and a standard deviation of ' + str(round(subredditStats['standardDeviation'], 3)) + '.' statComment += '\n\n' statComment += 'The most frequently requested anime/manga on this subreddit are: \n' for i, request in enumerate(subredditStats['topRequests']): statComment += str(i + 1) + '. ' + str(request[0]) + ' (' + str(request[1]) + ' - ' + str(request[2]) + ' requests) \n' statComment += '\n' statComment += 'The most frequent requesters on this subreddit are: \n' for i, requester in enumerate(subredditStats['topRequesters']): statComment += str(i + 1) + '. /u/' + str(requester[0]) + ' (' + str(requester[1]) + ' requests) \n' else: statComment += 'There have been no requests on /r/' + str(subreddit) + ' yet.' receipt = '(S) Request successful: Stats' print(receipt) return statComment except: traceback.print_exc() return None
def buildAnimeComment(isExpanded, ani, ap, kit): try: comment = '' title = None jTitle = None cType = None malURL = None aniURL = None apURL = ap status = None episodes = None genres = [] countdown = None nextEpisode = None desc = None release_year = None if ani: aniURL = 'http://anilist.co/anime/' + str(ani['id']) malURL = 'http://myanimelist.net/anime/' + str( ani['id_mal']) if ani['id_mal'] else None title = ani['title_romaji'] if 'title_romaji' in ani else ani[ 'title_english'] desc = ani['description'] if 'description' in ani else None status = ani['airing_status'].title( ) if 'airing_status' in ani else None cType = ani['type'] if 'type' in ani else None jTitle = ani['title_japanese'] if 'title_japanese' in ani else None genres = ani['genres'] if 'genres' in ani else None try: year_str = str(ani['start_date_fuzzy'] ) if 'start_date_fuzzy' in ani else None if year_str: release_year = year_str[:4] except: pass episodes = ani[ 'total_episodes'] if 'total_episodes' in ani else None if episodes == 0: episodes = None if ani['airing']: countdown = ani['airing']['countdown'] nextEpisode = ani['airing']['next_episode'] if kit: kitURL = kit['url'] if not title: title = kit['title_romaji'] if kit['title_romaji'] else kit[ 'title_english'] if not desc: desc = kit['description'] if 'description' in kit else None if not cType: cType = kit['type'].title() if 'type' in kit else None try: year_str = str( kit['startDate']) if 'startDate' in kit else None if year_str: release_year = year_str[:4] except: pass if not episodes: episodes = kit[ 'episode_count'] if 'episode_count' in kit else None if episodes == 0: episodes = None stats = DatabaseHandler.getRequestStats(title, 'Anime') # ---------- BUILDING THE COMMENT ----------# # ----- TITLE -----# comment += '**' + title.strip() + '** - (' # ----- LINKS -----# urlComments = [] if ani is not None: urlComments.append('[AL](' + sanitise_url_for_markdown(aniURL) + ')') if apURL is not None: urlComments.append('[A-P](' + sanitise_url_for_markdown(apURL) + ')') if kit is not None: urlComments.append('[KIT](' + sanitise_url_for_markdown(kitURL) + ')') if malURL: urlComments.append('[MAL](' + sanitise_url_for_markdown(malURL) + ')') for i, link in enumerate(urlComments): if i is not 0: comment += ', ' comment += link comment += ')' # ----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += '^^' + word # ----- INFO LINE -----# if (isExpanded): comment += '\n\n^(' if cType: comment += '**' + cType + '** | ' if release_year: comment += '**' + release_year + '**' if status: comment += ' | **Status:** ' + status if cType != 'Movie' and episodes: comment += ' | **Episodes:** ' + str(episodes) else: comment += '\n\n^(' if cType: comment += cType if status: comment += ' | Status: ' + status if cType != 'Movie' and episodes: comment += ' | Episodes: ' + str(episodes) if genres: if (isExpanded): comment += ' | **Genres:** ' else: comment += ' | Genres: ' for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre comment += ')' if (isExpanded) and (stats is not None): comment += ' \n^(**Stats:** ' + str( stats['total']) + ' requests across ' + str( stats['uniqueSubreddits']) + ' subreddits - ' + str( round(stats['totalAsPercentage'], 3)) + '% of all requests)' # ----- EPISODE COUNTDOWN -----# if (countdown is not None) and (nextEpisode is not None): current_utc_time = datetime.datetime.utcnow() air_time_in_utc = current_utc_time + datetime.timedelta( 0, countdown) formatted_time = air_time_in_utc.strftime('%Y%m%dT%H%M') # countdown is given to us in seconds days, countdown = divmod(countdown, 24 * 60 * 60) hours, countdown = divmod(countdown, 60 * 60) minutes, countdown = divmod(countdown, 60) comment += '\n\n^[Episode ' + str( nextEpisode ) + ' airs in ' + str(days) + ' days, ' + str( hours ) + ' hours, ' + str( minutes ) + ' minutes](https://www.timeanddate.com/worldclock/fixedtime.html?iso=' + formatted_time + ')' # ----- DESCRIPTION -----# if (isExpanded): comment += '\n\n' + cleanupDescription(desc) # ----- END -----# receipt = '(A) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if apURL is not None: receipt += 'AP ' if ani is not None: receipt += 'AL ' if kit is not None: receipt += 'KIT ' print(receipt) # We return the title/comment separately so we can track if multiples of the same comment have been requests (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) dictToReturn = {} dictToReturn['title'] = title dictToReturn['comment'] = comment return dictToReturn except: traceback.print_exc() return None
def buildMangaComment(isExpanded, ani, mu, ap, kit): try: comment = '' title = None jTitle = None cType = None malURL = None aniURL = None muURL = mu apURL = ap kitURL = None status = None chapters = None volumes = None genres = [] desc = None if ani: aniURL = 'http://anilist.co/manga/' + str(ani['id']) malURL = 'http://myanimelist.net/manga/' + str( ani['id_mal']) if ani['id_mal'] else None title = ani['title_romaji'] if 'title_romaji' in ani else ani[ 'title_english'] desc = ani['description'] if 'description' in ani else None status = ani['publishing_status'].title( ) if 'publishing_status' in ani else None cType = ani['type'] if 'type' in ani else None jTitle = ani['title_japanese'] if 'title_japanese' in ani else None genres = ani['genres'] if 'genres' in ani else None chapters = ani[ 'total_chapters'] if 'total_chapters' in ani else None if chapters == 0: chapters = None volumes = ani['total_volumes'] if 'total_volumes' in ani else None if volumes == 0: volumes = None if kit: kitURL = kit['url'] if not title: title = kit['title_romaji'] if kit['title_romaji'] else kit[ 'title_english'] if not desc: desc = kit['description'] if 'description' in kit else None if not cType: cType = kit['type'].title() if 'type' in kit else None if not chapters: chapters = kit[ 'chapter_count'] if 'chapter_count' in kit else None if chapters == 0: chapters = None if not volumes: volumes = kit['volume_count'] if 'volume_count' in kit else None if volumes == 0: volumes = None stats = DatabaseHandler.getRequestStats(title, 'Manga') # ---------- BUILDING THE COMMENT ----------# # ----- TITLE -----# comment += '**' + title.strip() + '** - (' # ----- LINKS -----# urlComments = [] if aniURL is not None: urlComments.append('[AL](' + sanitise_url_for_markdown(aniURL) + ')') if apURL is not None: urlComments.append('[A-P](' + sanitise_url_for_markdown(apURL) + ')') if kitURL is not None: urlComments.append('[KIT](' + sanitise_url_for_markdown(kitURL) + ')') if muURL is not None: urlComments.append('[MU](' + sanitise_url_for_markdown(muURL) + ')') if malURL: urlComments.append('[MAL](' + sanitise_url_for_markdown(malURL) + ')') for i, link in enumerate(urlComments): if i is not 0: comment += ', ' comment += link comment += ')' # ----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += '^^' + word # ----- INFO LINE -----# if (isExpanded): comment += '\n\n^(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += '**' + cType + '**' if status: comment += ' | **Status:** ' + status if (cType != 'Light Novel'): if volumes and str(volumes) is not 'Unknown': comment += ' | **Volumes:** ' + str(volumes) if chapters and str(chapters) is not 'Unknown': comment += ' | **Chapters:** ' + str(chapters) else: if volumes and str(volumes) is not 'Unknown': comment += ' | **Volumes:** ' + str(volumes) else: comment += '\n\n^(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += cType if status: comment += ' | Status: ' + status if (cType != 'Light Novel'): if volumes and str(volumes) is not 'Unknown': comment += ' | Volumes: ' + str(volumes) if chapters and str(chapters) is not 'Unknown': comment += ' | Chapters: ' + str(chapters) else: if volumes and str(volumes) is not 'Unknown': comment += ' | Volumes: ' + str(volumes) if genres: if (isExpanded): comment += ' | **Genres:** ' else: comment += ' | Genres: ' for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre comment += ')' if (isExpanded) and (stats is not None): comment += ' \n^(**Stats:** ' + str( stats['total']) + ' requests across ' + str( stats['uniqueSubreddits']) + ' subreddits - ' + str( round(stats['totalAsPercentage'], 3)) + '% of all requests)' # ----- DESCRIPTION -----# if (isExpanded): comment += '\n\n' + cleanupDescription(desc) # ----- END -----# receipt = '(M) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if ap is not None: receipt += 'AP ' if ani is not None: receipt += 'AL ' if kit is not None: receipt += 'KIT ' if muURL is not None: receipt += 'MU ' print(receipt) dictToReturn = {} dictToReturn['title'] = title dictToReturn['comment'] = comment return dictToReturn except: # traceback.print_exc() return None
def RetrieveModifiedFilesWorker(scripts, splash, networkTransferWindow, sendWindowCloseSignal): scripts.WriteDatabaseStorageToHdd() Globals.Cache.databaseAccessRLock.acquire() # Nab the online changelog try: splash.text = 'Downloading updated files...' except: pass # loop to prevent crashes during FTP stuff for i in range( 0, 20 ): # range( start, stop, step ) try: try: # try to connect to the FTP ftp = ConnectToFtp() ftp.cwd('/') ftp.cwd(Globals.configData.RemoteDatabasePath) except: # if FTP conn fails 3 times assume it doesn't work at all and just cancel if i > 2: networkTransferWindow.addListEntry("Couldn't connect to FTP Server. Databases may not be up-to-date.", "< Error >") try: splash.text = 'Grace Note Loaded'.format(scripts.roletext[scripts.role], Globals.ModeFlag) splash.complete = True splash.offline = True except: pass if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) Globals.Cache.databaseAccessRLock.release() return networkTransferWindow.addListEntry("Couldn't connect to FTP Server, retrying...", "< Error >") continue # get new changelog transferWindowChangeLogIdx = networkTransferWindow.addListEntry("Downloading...", "ChangeLog") changes = DownloadFile(scripts, ftp, 'ChangeLog', 'NewChangeLog') if not changes: networkTransferWindow.modifyListEntryStatus(transferWindowChangeLogIdx, "Failed, please retry.") if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) Globals.Cache.databaseAccessRLock.release() return networkTransferWindow.modifyListEntryStatus(transferWindowChangeLogIdx, "Complete!") # Get any new entries LogSet = DatabaseHandler.GetChangelogData() newLogSet = DatabaseHandler.GetNewChangelogData() DownloaderSet = LogSet.symmetric_difference(newLogSet) Downloader = [] for item in DownloaderSet: itemList = item[1].split(',') for subitem in itemList: Downloader.append(subitem) # remove possible duplicates from list, so it doesn't download the same file multiple times Downloader = list(set(Downloader)) FilesToDownload = [] # Don't download stuff that still has unsaved changes locally for item in Downloader: if item in scripts.update: networkTransferWindow.addListEntry("Not downloading, still has unsaved local changes.", item) else: transferWindowIdx = networkTransferWindow.addListEntry("Waiting...", item) FilesToDownload.append((item, transferWindowIdx)) # Download the files that have been changed for item, transferWindowIdx in FilesToDownload: networkTransferWindow.modifyListEntryStatus(transferWindowIdx, "Downloading...") DownloadDatabaseAndClean(scripts, ftp, item) networkTransferWindow.modifyListEntryStatus(transferWindowIdx, "Complete!") ftp.close() # Copy new change log over old Globals.CopyFile( Globals.configData.LocalDatabasePath + '/NewChangeLog', Globals.configData.LocalDatabasePath + '/ChangeLog' ) break except ftplib.all_errors: if i == 19: networkTransferWindow.addListEntry("Error during FTP transfer. Databases may not be up-to-date.", "< Error >") break networkTransferWindow.addListEntry("Error during FTP transfer, retrying...", "< Error >") continue try: splash.text = 'Grace Note now {0} in {1} Mode'.format(scripts.roletext[scripts.role], Globals.ModeFlag) splash.complete = True except: pass Globals.Cache.databaseAccessRLock.release() if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(True) return
#if(recommend.email == '*****@*****.**'): recommend.recommended_data[0].influencer.append("AMAN PRAJAPATI") recommend.recommended_data[0].influencer.append("AJAY DUBEY") recommend.recommended_data[0].influencer.append("RAKESH DHAR") Log.log(recommend.get_dict()) return recommend.__dict__ @staticmethod def __matching(subject, object): similarity_city = (1 - abs(subject.tier_city - object.tier_city) / 3.0) similarity_travel = (1 - abs(subject.count_travel - object.count_travel) / 100.0) similarity_auto = (1 - abs(subject.count_auto - object.count_auto) / 100.0) similarity_pol = (1 - abs(subject.count_pol - object.count_pol) / 100.0) similarity_shop = (1 - abs(subject.count_shop - object.count_shop) / 100.0) similarity = 1.0 * similarity_auto * similarity_city * similarity_pol * similarity_shop * similarity_travel * similarity_city Log.log("similarity",similarity) return similarity if __name__ =="__main__": data = DatabaseHandler.get_random_Detail() Recommender.get_recommendation(data)
def buildAnimeComment(isExpanded, mal, hb, ani, ap, anidb): try: comment = '' title = None jTitle = None cType = None malURL = None hbURL = None aniURL = None apURL = ap anidbURL = anidb youtubeTrailer = None status = None episodes = None genres = [] countdown = None nextEpisode = None desc = None if mal: desc = mal['synopsis'] if mal['type']: cType = mal['type'] malURL = 'http://myanimelist.net/anime/' + str(mal['id']) if ani is not None: title = ani['title_romaji'] aniURL = 'http://anilist.co/anime/' + str(ani['id']) try: cType = ani['type'] desc = ani['description'] except: pass status = ani['airing_status'].title() try: if ani['title_japanese'] is not None: jTitle = ani['title_japanese'] if ani['youtube_id'] is not None: youtubeTrailer = ani['youtube_id'] if ani['total_episodes'] is not None: if ani['total_episodes'] == 0: episodes = 'Unknown' else: episodes = ani['total_episodes'] if ani['genres'] is not None: genres = ani['genres'] if ani['airing'] is not None: countdown = ani['airing']['countdown'] nextEpisode = ani['airing']['next_episode'] except: print('No full details for Anilist') if hb is not None: title = hb['title'] desc = hb['synopsis'] status = hb['status'] if hb['show_type']: cType = hb['show_type'] hbURL = hb['url'] if hb['mal_id'] and not malURL: malURL = 'http://myanimelist.net/anime/' + str(hb['mal_id']) if (hb['genres'] is not None) and (not genres): for genre in hb['genres']: genres.append(genre['name']) if (hb['episode_count'] is not None): episodes = hb['episode_count'] else: episodes = 'Unknown' stats = DatabaseHandler.getRequestStats(title,False) if ani is None: stats = DatabaseHandler.getRequestStats(hb['title'],False) else: stats = DatabaseHandler.getRequestStats(ani['title_romaji'],False) #---------- BUILDING THE COMMENT ----------# #----- TITLE -----# comment += '**' + title.strip() + '** - (' #----- LINKS -----# urlComments = [] if malURL is not None: urlComments.append('[MAL](' + malURL + ')') if apURL is not None: urlComments.append('[A-P](' + apURL + ')') if hb is not None: urlComments.append('[HB](' + hbURL + ')') if ani is not None: urlComments.append('[ANI](' + aniURL + ')') if anidbURL is not None: urlComments.append('[ADB](' + anidbURL + ')') for i, link in enumerate(urlComments): if i is not 0: comment += ', ' comment += link comment += ')' #----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += '^^' + word #----- INFO LINE -----# if (isExpanded): comment += '\n\n^(' if cType: comment += '**' + cType + '** | ' comment += '**Status:** ' + status if cType != 'Movie': comment += ' | **Episodes:** ' + str(episodes) comment += ' | **Genres:** ' else: comment += '\n\n^(' if cType: comment += cType + ' | ' comment += 'Status: ' + status if cType != 'Movie': comment += ' | Episodes: ' + str(episodes) comment += ' | Genres: ' if not (genres == []): for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre else: comment += 'None' if (isExpanded) and (stats is not None): comment += ' \n**Stats:** ' + str(stats['total']) + ' requests across ' + str(stats['uniqueSubreddits']) + ' subreddit(s)^) ^- ^' + str(round(stats['totalAsPercentage'],3)) + '% ^of ^all ^requests' else: comment += ')' #----- EPISODE COUNTDOWN -----# if (countdown is not None) and (nextEpisode is not None): #countdown is given to us in seconds days, countdown = divmod(countdown, 24*60*60) hours, countdown = divmod(countdown, 60*60) minutes, countdown = divmod(countdown, 60) comment += '\n\n^(Episode ' + str(nextEpisode) + ' airs in ' + str(days) + ' days, ' + str(hours) + ' hours, ' + str(minutes) + ' minutes)' #----- DESCRIPTION -----# if (isExpanded): comment += '\n\n' + cleanupDescription(desc) #----- END -----# receipt = '(A) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if apURL is not None: receipt += 'AP ' if hb is not None: receipt += 'HB ' if ani is not None: receipt += 'ANI ' if anidbURL is not None: receipt += 'ADB ' print(receipt) #We return the title/comment separately so we can track if multiples of the same comment have been requests (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) dictToReturn = {} dictToReturn['title'] = title dictToReturn['comment'] = comment return dictToReturn except: #traceback.print_exc() return None
async def on_server_join(server): DatabaseHandler.addServerToDatabase(server.id) print("Added server {} to database".format(server.id))
def __init__(self, parent, Rid, database, *args, **kwargs): """ Input: self - The object containing the frame being called Input: parent - The parent window Input: Rid - The Rid of the recipe to be edited Returns: None Purpose: Creates an opens the editing window with fields populated as approriate """ tk.Toplevel.__init__(self, parent, *args, **kwargs) self.database = database self.recipe = database.getRecipe(Rid) self.title("Edit - " + self.recipe["name"]) ### Make the options menu self.fr_options = ttk.Frame(self, relief="raised", padding=1) self.fr_options.grid(row=0, column=0, sticky="NESW") self.rowconfigure(0, weight=1) self.lbl_addIngredient = ttk.Label(self.fr_options, text="Add Ingredients:", font=("Times", "12")) self.lbl_addIngredient.pack() self.ent_category = self.makeLabelEntry(self.fr_options, "Category: ", {}, None, frameside=tk.TOP, width=15) self.ent_ingName = self.makeLabelEntry(self.fr_options, "Name: ", {}, None, frameside=tk.TOP, width=15) self.btn_add = ttk.Button(self.fr_options, text="Add", command=self.add) self.btn_add.pack() self.btn_close = ttk.Button(self.fr_options, text="Close", command=self.destroy) self.btn_close.pack(side=tk.BOTTOM, anchor="s", pady=(5, 5)) self.btn_save = ttk.Button(self.fr_options, text="Save", command=self.save) self.btn_save.pack(side=tk.BOTTOM, anchor="s", pady=(40, 0)) ### Done making options menu keys = [ "name", "start", "end", "abv", "OG", "FG", "volume", "instructions" ] tags = [ "", "Start date: ", "End date: ", "abv: ", "OG: ", "FG: ", "Batch size: ", "Instructions: " ] self.entrys = {} self.fr_recipe = ttk.Frame(self) self.fr_recipe.grid(row=0, column=1, sticky="EW") self.columnconfigure(1, weight=1) ttk.Label(self.fr_recipe, text=self.recipe["name"] + " V" + str(self.recipe["version"]), font=("times", "20", "bold")).pack() keys = ["start", "end"] tags = ["Start date: ", "End date: "] self.makeLabelEntrys(self.fr_recipe, self.recipe, keys, tags) keys = ["abv", "OG", "FG", "volume"] tags = ["abv: ", "OG: ", "FG: ", "Volume: "] self.makeLabelEntrys(self.fr_recipe, self.recipe, keys, tags) self.ingredientList = database.getIngredients(Rid) self.makeIngredientList(self.ingredientList) fr_instuctions = ttk.LabelFrame(self.fr_recipe, text="Instructions") fr_instuctions.pack(side=tk.TOP, expand="TRUE", fill="both") self.txt_instructions = tk.Text(fr_instuctions, wrap=tk.WORD) self.txt_instructions.insert(tk.END, self.recipe["instructions"]) self.txt_instructions.pack(expand="true", fill="both")
def buildStatsEmbed(server=None, username=None, serverID="171004769069039616"): try: userNick = '' statComment = '' receipt = '(S) Request successful: Stats' if username is not None: reqMember = server.get_member(username) if reqMember.nick: userNick = reqMember.nick else: userNick = reqMember.name userStats = DatabaseHandler.getUserStats(username) if userStats: statComment += 'Some stats on ' + userNick + ':\n\n' statComment += '- **' + str( userStats['totalUserRequests'] ) + '** requests made (' + str( round(userStats['totalUserRequestsAsPercentage'], 3)) + '% of all requests and #' + str( userStats['overallRequestRank']) + ' overall)\n' statComment += '- **' + str( userStats['uniqueRequests'] ) + '** unique anime/manga requested\n' statComment += '\n' statComment += 'Their most frequently requested anime/manga overall are:\n\n' for i, request in enumerate(userStats['topRequests']): statComment += str(i + 1) + '. **' + str( request[0]) + '** (' + str(request[1]) + ' - ' + str( request[2]) + ' requests) \n' else: statComment += str(userNick) + ' hasn\'t used Roboragi yet.' receipt += ' - ' + userNick elif server: serverStats = DatabaseHandler.getSubredditStats(server) if serverStats: statComment += '**' + server.name + ' Stats**\n\n' statComment += 'On ' + server.name statComment += ' I have fulfilled a total of ' + str( serverStats['total']) + ' requests, ' statComment += 'representing ' + str( round(serverStats['totalAsPercentage'], 2)) + '% of all requests. ' statComment += 'A total of ' + str( serverStats['uniqueNames'] ) + ' unique anime/manga have been requested here, ' statComment += 'with a mean value of ' + str( round(serverStats['meanValuePerRequest'], 3)) + ' requests/show' statComment += ' and a standard deviation of ' + str( round(serverStats['standardDeviation'], 3)) + '.' statComment += '\n\n' statComment += 'The most frequently requested anime/manga on this server are:\n\n' for i, request in enumerate(serverStats['topRequests']): statComment += str(i + 1) + '. **' + str( request[0]) + '** (' + str(request[1]) + ' - ' + str( request[2]) + ' requests)\n' else: statComment += 'There have been no requests on ' + str( server) + ' yet.' receipt += ' - ' + server.name else: basicStats = DatabaseHandler.getBasicStats(serverID) #The overall stats section statComment += '**Overall Stats**\n\n' statComment += 'I\'ve searched through ' + str( basicStats['totalComments']) statComment += ' unique comments and fulfilled a total of ' + str( basicStats['total']) statComment += ' requests across ' + str( basicStats['uniqueSubreddits']) + ' unique server(s). ' statComment += 'A total of ' + str(basicStats['uniqueNames']) statComment += ' unique anime/manga have been requested, with a mean value of ' + str( round(basicStats['meanValuePerRequest'], 3)) statComment += ' requests/show and a standard deviation of ' + str( round(basicStats['standardDeviation'], 3)) + '.' statComment += '\n\n' statComment += 'The most frequently requested anime/manga overall are:\n\n' for i, request in enumerate(basicStats['topRequests']): statComment += str(i + 1) + '. **' + str( request[0]) + '** (' + str(request[1]) + ' - ' + str( request[2]) + ' requests)\n' statComment += '\n' receipt += ' - Basic' print(receipt.encode('utf8')) localEmbed = buildEmbedObject('Stats', '', statComment, '', False, '') return localEmbed except: traceback.print_exc() return None
""" This script calls the database handler to make a new copy of the example recipe """ import DatabaseHandler as database if __name__ == '__main__': db = database.Database(r".\Brewing.db") ingredients = [{"name":"DAP", "category":"yeast nutrient", "amount":"4", "unit":"g", "additionTime":"", "state":"add"},\ {"name":"Yeast Hulls", "category":"yeast nutrient", "amount":"4", "unit":"g", "additionTime":"", "state":"add"},\ {"name":"EC-118", "category":"yeast", "amount":"1", "unit":"pack", "additionTime":"", "state":"add"},\ {"name":"Organic cane sugar", "category":"sugar", "amount":"500","additionTime":"", "unit":"g","state":"add"},\ {"name":"Lime", "category":"fruit","amount":"1","unit":"g","additionTime":"","state":"add"}] recipe = { "name": "Ginger Beer", "version": -1, "start": "2021-04-28", "end": "2021-05-21", "abv": "5.0", "volume": "1", "instructions": "Bring the water to a boil, wrap the grains in cheesecloth and place them in the boiling water to steep for 15 minutes. After steeping add lime juice and allow the mixture to cool to ~20C. Transfer to your fermentation vessel of choice and pitch the yeast" } db.setorUpdateRecipe(recipe, ingredients)
def save(self): """ Input: self - The object containing the frame being called Returns: Nothing Purpose: Reads the data from all of the entry feilds and updates the respective lsits before calling setorUpdateRecipe() to insert the new information into the database. """ for key in ["start", "end", "abv", "OG", "FG", "volume"]: self.recipe[key] = self.entrys[key].get() self.recipe["instructions"] = self.txt_instructions.get("1.0", tk.END) for index, ingredient in enumerate(self.ingredientList): if not ingredient: self.ingredientList.pop(index) self.IngredientFrameList.pop(index) continue if ingredient["state"] == "add" or ingredient["state"] == "current": for key in ["unit", "amount"]: ingredient[key] = self.IngredientFrameList[index][key].get( ) self.database.setorUpdateRecipe(self.recipe, self.ingredientList) pass if __name__ == "__main__": root = tk.Tk() root.title("ParentWindow") test = editWindow(root, 1, database.Database(r".\Brewing.db")) root.mainloop()
import DatabaseHandler import restApiHandler print("Welcome to our test script for API \n") #Attempting connection to API if(restApiHandler.check_conenction_to_api()): print("Connection to API working ! \n") else: print("API down \n") #checking our DB is app DatabaseHandler.check_database_connection() #print(restApiHandler.get_pet_byId(2))
def buildLightNovelComment(isExpanded, mal, ani, nu, lndb, kit): try: comment = '' title = None jTitle = None cType = None malURL = None aniURL = None nuURL = nu lndbURL = lndb kitURL = None status = None chapters = None volumes = None genres = [] desc = None if ani: aniURL = 'http://anilist.co/manga/' + str(ani['id']) title = ani['title_romaji'] if 'title_romaji' in ani else ani['title_english'] desc = ani['description'] if 'description' in ani else None status = ani['publishing_status'].title() if 'publishing_status' in ani else None cType = ani['type'] if 'type' in ani else None jTitle = ani['title_japanese'] if 'title_japanese' in ani else None genres = ani['genres'] if 'genres' in ani else None chapters = ani['total_chapters'] if 'total_chapters' in ani else None if chapters == 0: chapters = None volumes = ani['total_volumes'] if 'total_volumes' in ani else None if volumes == 0: volumes = None if kit: kitURL = kit['url'] if not title: title = kit['title_romaji'] if 'title_romaji' in kit else kit['title_english'] if not desc: desc = kit['description'] if 'description' in kit else None if not cType: cType = kit['type'].title() if 'type' in kit else None if not chapters: chapters = kit['chapter_count'] if 'chapter_count' in kit else None if chapters == 0: chapters = None if not volumes: volumes = kit['volume_count'] if 'volume_count' in kit else None if volumes == 0: volumes = None if mal: malURL = 'http://myanimelist.net/manga/' + str(mal['id']) if not title: title = mal['title'] if 'title' in mal else mal['english'] if not desc: desc = mal['synopsis'] if 'synopsis' in mal else None if not status: status = mal['status'] if 'status' in mal else None if not cType: cType = mal['type'] if 'type' in mal else None if not chapters: try: if (int(mal['chapters']) == 0): chapters = 'Unknown' else: chapters = mal['chapters'] except: chapters = None if not volumes: try: if (int(mal['volumes']) == 0): volumes = 'Unknown' else: volumes = mal['volumes'] except: volumes = 'Unknown' stats = DatabaseHandler.getRequestStats(title,'LN') #---------- BUILDING THE COMMENT ----------# #----- TITLE -----# comment += '**' + title.strip() + '** - (' #----- LINKS -----# urlComments = [] try: mal_english = mal['english'] except: pass if malURL and mal_english: urlComments.append('[MAL](' + sanitise_url_for_markdown(malURL) + ' "' + mal_english + '")') elif malURL: urlComments.append('[MAL](' + sanitise_url_for_markdown(malURL) + ')') if aniURL is not None: urlComments.append('[AL](' + sanitise_url_for_markdown(aniURL) + ')') if kitURL is not None: urlComments.append('[KIT](' + sanitise_url_for_markdown(kitURL) + ')') if nuURL is not None: urlComments.append('[NU](' + sanitise_url_for_markdown(nuURL) + ')') if lndbURL is not None: urlComments.append('[LNDB](' + sanitise_url_for_markdown(lndbURL) + ')') for i, link in enumerate(urlComments): if i is not 0: comment += ', ' comment += link comment += ')' #----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += '^^' + word #----- INFO LINE -----# if (isExpanded): comment += '\n\n^(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += '**' + cType + '** | ' comment += '**Status:** ' + status if (cType != 'Light Novel'): if volumes and str(volumes) is not 'Unknown': comment += ' | **Volumes:** ' + str(volumes) if chapters and str(chapters) is not 'Unknown': comment += ' | **Chapters:** ' + str(chapters) else: if volumes and tr(volumes) is not 'Unknown': comment += ' | **Volumes:** ' + str(volumes) if genres: comment += ' | **Genres:** ' else: comment += '\n\n^(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += cType + ' | ' comment += 'Status: ' + status if (cType != 'Light Novel'): if volumes and str(volumes) is not 'Unknown': comment += ' | Volumes: ' + str(volumes) if chapters and str(chapters) is not 'Unknown': comment += ' | Chapters: ' + str(chapters) else: if volumes and str(volumes) is not 'Unknown': comment += ' | Volumes: ' + str(volumes) if genres: comment += ' | Genres: ' if genres: for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre if (isExpanded) and (stats is not None): comment += ' \n**Stats:** ' + str(stats['total']) + ' requests across ' + str(stats['uniqueSubreddits']) + ' subreddit(s)^) ^- ^' + str(round(stats['totalAsPercentage'],3)) + '% ^of ^all ^requests' else: comment += ')' #----- DESCRIPTION -----# if (isExpanded): comment += '\n\n' + cleanupDescription(desc) #----- END -----# receipt = '(LN) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if ani is not None: receipt += 'AL ' if kit is not None: receipt += 'KIT ' if nuURL is not None: receipt += 'MU ' if lndbURL is not None: receipt += 'LNDB ' print(receipt) dictToReturn = {} dictToReturn['title'] = title dictToReturn['comment'] = comment return dictToReturn except: traceback.print_exc() return None
def buildAnimeComment(isExpanded, mal, ani, ap, anidb, kit): try: comment = '' title = None jTitle = None cType = None malURL = None aniURL = None apURL = ap anidbURL = anidb status = None episodes = None genres = [] countdown = None nextEpisode = None desc = None if ani: aniURL = 'http://anilist.co/anime/' + str(ani['id']) title = ani['title_romaji'] if 'title_romaji' in ani else ani['title_english'] desc = ani['description'] if 'description' in ani else None status = ani['airing_status'].title() if 'airing_status' in ani else None cType = ani['type'] if 'type' in ani else None jTitle = ani['title_japanese'] if 'title_japanese' in ani else None genres = ani['genres'] if 'genres' in ani else None episodes = ani['total_episodes'] if 'total_episodes' in ani else None if episodes == 0: episodes = None if ani['airing']: countdown = ani['airing']['countdown'] nextEpisode = ani['airing']['next_episode'] if kit: kitURL = kit['url'] if not title: title = kit['title_romaji'] if 'title_romaji' in kit else kit['title_english'] if not desc: desc = kit['description'] if 'description' in kit else None if not cType: cType = kit['type'].title() if 'type' in kit else None if not episodes: episodes = kit['episode_count'] if 'episode_count' in kit else None if episodes == 0: episodes = None if mal: malURL = 'http://myanimelist.net/anime/' + str(mal['id']) if not title: title = mal['title'] if 'title' in mal else mal['english'] if not desc: desc = mal['synopsis'] if 'synopsis' in mal else None if not status: status = mal['status'] if 'status' in mal else None if not cType: cType = mal['type'] if 'type' in mal else None stats = DatabaseHandler.getRequestStats(title, 'Anime') #---------- BUILDING THE COMMENT ----------# #----- TITLE -----# comment += '**' + title.strip() + '** - (' #----- LINKS -----# urlComments = [] try: mal_english = mal['english'] except: pass if malURL and mal_english: urlComments.append('[MAL](' + sanitise_url_for_markdown(malURL) + ' "' + mal_english + '")') elif malURL: urlComments.append('[MAL](' + sanitise_url_for_markdown(malURL) + ')') if apURL is not None: urlComments.append('[A-P](' + sanitise_url_for_markdown(apURL) + ')') if ani is not None: urlComments.append('[AL](' + sanitise_url_for_markdown(aniURL) + ')') if kit is not None: urlComments.append('[KIT](' + sanitise_url_for_markdown(kitURL) + ')') if anidbURL is not None: urlComments.append('[ADB](' + sanitise_url_for_markdown(anidbURL) + ')') for i, link in enumerate(urlComments): if i is not 0: comment += ', ' comment += link comment += ')' #----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += '^^' + word #----- INFO LINE -----# if (isExpanded): comment += '\n\n^(' if cType: comment += '**' + cType + '** | ' comment += '**Status:** ' + status if cType != 'Movie' and episodes: comment += ' | **Episodes:** ' + str(episodes) comment += ' | **Genres:** ' else: comment += '\n\n^(' if cType: comment += cType + ' | ' comment += 'Status: ' + status if cType != 'Movie' and episodes: comment += ' | Episodes: ' + str(episodes) comment += ' | Genres: ' if not (genres == []): for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre else: comment += 'None' if (isExpanded) and (stats is not None): comment += ' \n**Stats:** ' + str(stats['total']) + ' requests across ' + str(stats['uniqueSubreddits']) + ' subreddit(s)^) ^- ^' + str(round(stats['totalAsPercentage'],3)) + '% ^of ^all ^requests' else: comment += ')' #----- EPISODE COUNTDOWN -----# if (countdown is not None) and (nextEpisode is not None): #countdown is given to us in seconds days, countdown = divmod(countdown, 24*60*60) hours, countdown = divmod(countdown, 60*60) minutes, countdown = divmod(countdown, 60) comment += '\n\n^(Episode ' + str(nextEpisode) + ' airs in ' + str(days) + ' days, ' + str(hours) + ' hours, ' + str(minutes) + ' minutes)' #----- DESCRIPTION -----# if (isExpanded): comment += '\n\n' + cleanupDescription(desc) #----- END -----# receipt = '(A) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if apURL is not None: receipt += 'AP ' if ani is not None: receipt += 'AL ' if kit is not None: receipt += 'KIT ' if anidbURL is not None: receipt += 'ADB ' print(receipt) #We return the title/comment separately so we can track if multiples of the same comment have been requests (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) dictToReturn = {} dictToReturn['title'] = title dictToReturn['comment'] = comment return dictToReturn except: traceback.print_exc() return None
def CalculateCompletionForDatabaseTreeNode(node): Globals.Cache.databaseAccessRLock.acquire() CompletionConnection, CompletionCursor = DatabaseHandler.GetCompletionPercentageConnectionAndCursor( ) DatabaseConnection = DatabaseHandler.OpenEntryDatabase(node.Name) DatabaseCursor = DatabaseConnection.cursor() databaseName = GetCompletionTableDatabaseNameOfTreeNode(node) for i in range(0, Globals.configData.TranslationStagesCount + 1): if not node.Subsections: DatabaseCursor.execute( 'SELECT Count(1) FROM Text WHERE status >= {0}'.format(i)) count = DatabaseCursor.fetchall()[0][0] else: count = 0 for sub in node.Subsections: DatabaseCursor.execute( 'SELECT Count(1) FROM Text WHERE status >= {0} AND ID >= {1} AND ID <= {2}' .format(i, sub.Start, sub.End)) count += int(DatabaseCursor.fetchall()[0][0]) CompletionCursor.execute( "SELECT Count(1) FROM StatusData WHERE database = ? AND type = ?", [databaseName, i]) exists = CompletionCursor.fetchall()[0][0] if exists > 0: CompletionCursor.execute( "UPDATE StatusData SET amount = ? WHERE database = ? AND type = ?", [count, databaseName, i]) else: CompletionCursor.execute( "INSERT INTO StatusData (database, type, amount) VALUES (?, ?, ?)", [databaseName, i, count]) if not node.Subsections: DatabaseCursor.execute("SELECT Count(1) FROM Text WHERE comment != ''") count = DatabaseCursor.fetchall()[0][0] else: count = 0 for sub in node.Subsections: DatabaseCursor.execute( "SELECT Count(1) FROM Text WHERE comment != '' AND ID >= {0} AND ID <= {1}" .format(sub.Start, sub.End)) count += int(DatabaseCursor.fetchall()[0][0]) # type == -2 for comment count CompletionCursor.execute( "SELECT Count(1) FROM StatusData WHERE database = ? AND type = -2", [databaseName]) exists = CompletionCursor.fetchall()[0][0] if exists > 0: CompletionCursor.execute( "UPDATE StatusData SET amount = ? WHERE database = ? AND type = -2", [count, databaseName]) else: CompletionCursor.execute( "INSERT INTO StatusData (database, type, amount) VALUES (?, -2, ?)", [databaseName, count]) CompletionConnection.commit() Globals.Cache.databaseAccessRLock.release()
def buildLightNovelComment(isExpanded, mal, ani, nu, lndb): try: comment = '' title = None jTitle = None cType = None malURL = None aniURL = None nuURL = nu lndbURL = lndb status = None chapters = None volumes = None genres = [] desc = None if not (mal is None): title = mal['title'] malURL = 'http://myanimelist.net/manga/' + str(mal['id']) desc = mal['synopsis'] status = mal['status'] cType = mal['type'] try: if (int(mal['chapters']) == 0): chapters = 'Unknown' else: chapters = mal['chapters'] except: chapters = 'Unknown' try: volumes = mal['volumes'] except: volumes = 'Unknown' if ani is not None: if title is None: title = ani['title_english'] aniURL = 'http://anilist.co/manga/' + str(ani['id']) desc = ani['description'] status = ani['publishing_status'].title() cType = ani['type'] try: if ani['title_japanese'] is not None: jTitle = ani['title_japanese'] if ani['total_chapters'] is not None: if ani['total_chapters'] == 0: chapters = 'Unknown' else: chapters = ani['total_chapters'] if ani['total_volumes'] is not None: volumes = ani['total_volumes'] else: volumes = 'Unknown' if ani['genres'] is not None: genres = ani['genres'] except Exception as e: print(e) stats = DatabaseHandler.getRequestStats(title, 'LN') #---------- BUILDING THE COMMENT ----------# #----- TITLE -----# comment += '**' + title.strip() + '** -\n\n' #----- LINKS -----# urlComments = [] if malURL is not None: urlComments.append(malURL) if aniURL is not None: urlComments.append(aniURL) if nuURL is not None: urlComments.append(nuURL) if lndbURL is not None: urlComments.append(lndbURL) for i, link in enumerate(urlComments): if i is not 0: comment += '\n ' comment += link comment += '\n\n' #----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += word #----- INFO LINE -----# if (isExpanded): comment += '\n\n(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += '**' + cType + '** | ' comment += '**Status:** ' + status if (cType != 'Light Novel'): if str(chapters) is not 'Unknown': comment += ' | **Chapters:** ' + str(chapters) else: comment += ' | **Volumes:** ' + str(volumes) if genres: comment += ' | **Genres:** ' else: comment += '\n\n(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += cType + ' | ' comment += 'Status: ' + status if (cType != 'Light Novel'): if str(chapters) is not 'Unknown': comment += ' | Chapters: ' + str(chapters) else: comment += ' | Volumes: ' + str(volumes) if genres: comment += ' | Genres: ' if genres: for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre if (isExpanded) and (stats is not None): comment += ' \n**Stats:** ' + str( stats['total']) + ' requests across ' + str( stats['uniqueSubreddits']) + ' subreddit(s)^) ^- ^' + str( round(stats['totalAsPercentage'], 3)) + '% ^of ^all ^requests' else: comment += ')' #----- DESCRIPTION -----# if (isExpanded): comment += '\n\n' + cleanupDescription(desc) #----- END -----# receipt = '(LN) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if ani is not None: receipt += 'ANI ' if nuURL is not None: receipt += 'MU ' if lndbURL is not None: receipt += 'LNDB ' print(receipt.encode('utf8')) dictToReturn = {} dictToReturn['title'] = title dictToReturn['comment'] = comment return dictToReturn except: traceback.print_exc() return None
# # Main program # import ReportFinder import ReportReader import DatabaseHandler import os from Settings import PathSettings dir = PathSettings['ReportDirectory'] files = ReportFinder.enumReports(dir if dir else os.getcwd()) DatabaseHandler.connectToDatabase(PathSettings['Database']) for file in files: report = ReportReader.readReport(file) DatabaseHandler.insertToDatabase(report)
def buildStatsComment(server=None, username=None, serverID="171004769069039616"): try: statComment = '' receipt = '(S) Request successful: Stats' if username: userStats = DatabaseHandler.getUserStats(username) if userStats: statComment += 'Some stats on ' + username + ':\n\n' statComment += '- **' + str( userStats['totalUserComments'] ) + '** total comments searched (' + str( round(userStats['totalUserCommentsAsPercentage'], 3)) + '% of all comments)\n' statComment += '- **' + str( userStats['totalUserRequests'] ) + '** requests made (' + str( round(userStats['totalUserRequestsAsPercentage'], 3)) + '% of all requests and #' + str( userStats['overallRequestRank']) + ' overall)\n' statComment += '- **' + str( userStats['uniqueRequests'] ) + '** unique anime/manga requested\n' statComment += '- **/r/' + str( userStats['favouriteSubreddit'] ) + '** is their favourite server with ' + str( userStats['favouriteSubredditCount'] ) + ' requests (' + str( round(userStats['favouriteSubredditCountAsPercentage'], 3)) + '% of the server\'s requests)\n' statComment += '\n' statComment += 'Their most frequently requested anime/manga overall are:\n\n' for i, request in enumerate(userStats['topRequests']): statComment += str(i + 1) + '. **' + str( request[0]) + '** (' + str(request[1]) + ' - ' + str( request[2]) + ' requests) \n' else: statComment += '/u/' + str( username) + ' hasn\'t used Roboragi yet.' receipt += ' - /u/' + username elif server: serverID = server.id server = str(server) serverStats = DatabaseHandler.getSubredditStats(server.lower()) if serverStats: statComment += '**' + server + ' Stats**\n\n' statComment += 'I\'ve searched through ' + str( serverStats['totalComments']) statComment += ' unique comments on ' + server statComment += ' and fulfilled a total of ' + str( serverStats['total']) + ' requests, ' statComment += 'representing ' + str( round(serverStats['totalAsPercentage'], 2)) + '% of all requests. ' statComment += 'A total of ' + str( serverStats['uniqueNames'] ) + ' unique anime/manga have been requested here, ' statComment += 'with a mean value of ' + str( round(serverStats['meanValuePerRequest'], 3)) + ' requests/show' statComment += ' and a standard deviation of ' + str( round(serverStats['standardDeviation'], 3)) + '.' statComment += '\n\n' statComment += 'The most frequently requested anime/manga on this server are:\n\n' for i, request in enumerate(serverStats['topRequests']): statComment += str(i + 1) + '. **' + str( request[0]) + '** (' + str(request[1]) + ' - ' + str( request[2]) + ' requests)\n' statComment += '\n' statComment += 'The most frequent requesters on this server are:\n\n' for i, requester in enumerate(serverStats['topRequesters']): statComment += str(i + 1) + '. /u/' + str( requester[0]) + ' (' + str( requester[1]) + ' requests)\n' else: statComment += 'There have been no requests on ' + str( server) + ' yet.' receipt += ' - ' + server else: basicStats = DatabaseHandler.getBasicStats(serverID) #The overall stats section statComment += '**Overall Stats**\n\n' statComment += 'I\'ve searched through ' + str( basicStats['totalComments']) statComment += ' unique comments and fulfilled a total of ' + str( basicStats['total']) statComment += ' requests across ' + str( basicStats['uniqueSubreddits']) + ' unique server(s). ' statComment += 'A total of ' + str(basicStats['uniqueNames']) statComment += ' unique anime/manga have been requested, with a mean value of ' + str( round(basicStats['meanValuePerRequest'], 3)) statComment += ' requests/show and a standard deviation of ' + str( round(basicStats['standardDeviation'], 3)) + '.' statComment += '\n\n' statComment += 'The most frequently requested anime/manga overall are:\n\n' for i, request in enumerate(basicStats['topRequests']): statComment += str(i + 1) + '. **' + str( request[0]) + '** (' + str(request[1]) + ' - ' + str( request[2]) + ' requests)\n' statComment += '\n' statComment += 'The most frequent requesters overall are: \n' for i, requester in enumerate(basicStats['topRequesters']): statComment += str(i + 1) + '. ' + str( Discord.getUsernameFromID(requester[0], )) + ' (' + str( requester[1]) + ' requests) \n' statComment += '\n' receipt += ' - Basic' print(receipt.encode('utf8')) return statComment except: traceback.print_exc() return None
def run(self): try: print("Starting stream") commentStream = self.reddit.subreddit( self.subredditList).stream.comments() for comment in commentStream: if ((time.time() - self.updateTime) > Config.tcgUpdateInterval * 60 * 60): DatabaseHandler.updateTCGCardlist() self.updateTime = time.time() if ((time.time() - self.submissionsLastProcessed) > Config.submissionProcessingInterval * 60 * 60): self.submissionProcessor.processSubmissions(100) self.submissionsLastProcessed = time.time() #print("Found comment") #If we've already seen this comment, ignore it if DatabaseHandler.commentExists(comment.id): continue #If the post has been deleted, getting the author will return an error try: author = comment.author.name except Exception as e: continue #If this is one of our own comments, ignore it if (author == 'YGOLinkBot'): continue reply = self.requestHandler.buildResponse(comment.body) try: if reply: cards = re.findall(r'\[\*\*(.+?)\*\*\]\(', reply) for card in cards: DatabaseHandler.addRequest(card, author, comment.subreddit) if ("VENT THREAD" in comment.link_title): reply = self.submissionProcessor.convertCase( True, reply) elif ("happiness thread" in comment.link_title): reply = self.submissionProcessor.convertCase( False, reply) comment.reply(reply) DatabaseHandler.addComment(comment.id, author, comment.subreddit, True) print("Comment made.\n") else: if ('{' in comment.body and '}' in comment.body): print('') DatabaseHandler.addComment(comment.id, author, comment.subreddit, False) except Exception as e: SendErrorMail(e, traceback.format_exc()) print("Reddit probably broke when replying:" + str(e) + '\n') except Exception as e: SendErrorMail(e, traceback.format_exc()) print(e) pass
def buildAnimeEmbed(isExpanded, mal, ani, ap, anidb): try: comment = '' descComment = '' title = None jTitle = None cType = None malimage = '' malURL = None aniURL = None apURL = ap anidbURL = anidb youtubeTrailer = None status = None episodes = None genres = [] countdown = None nextEpisode = None desc = None if mal: desc = mal['synopsis'] if mal['type']: cType = mal['type'] malURL = 'http://myanimelist.net/anime/' + str(mal['id']) if mal['title']: title = mal['title'] if mal['english']: title = mal['english'] if mal['image']: malimage = mal['image'] if mal['status']: status = mal['status'] if ani is not None: title = ani['title_romaji'] aniURL = 'http://anilist.co/anime/' + str(ani['id']) try: cType = ani['type'] desc = ani['description'] except: pass if status is None: try: status = ani['airing_status'].title() except Exception as e: print(e) try: if ani['title_japanese'] is not None: jTitle = ani['title_japanese'] if ani['youtube_id'] is not None: youtubeTrailer = ani['youtube_id'] if ani['total_episodes'] is not None: if ani['total_episodes'] == 0: episodes = 'Unknown' else: episodes = ani['total_episodes'] if ani['genres'] is not None: genres = ani['genres'] if ani['airing'] is not None: countdown = ani['airing']['countdown'] nextEpisode = ani['airing']['next_episode'] except: print('No full details for Anilist') stats = DatabaseHandler.getRequestStats(title, 'Anime') if ani is not None: stats = DatabaseHandler.getRequestStats(ani['title_romaji'], 'Anime') #---------- BUILDING THE COMMENT ----------# comment = '' #----- LINKS -----# urlComments = [] allLinks = '' try: mal_english = mal['english'] except: pass if malURL is not None: urlComments.append("[MAL]({})".format( sanitise_url_for_markdown(malURL))) if apURL is not None: urlComments.append("[AP]({})".format( sanitise_url_for_markdown(apURL))) if ani is not None: urlComments.append("[AL]({})".format( sanitise_url_for_markdown(aniURL))) if anidbURL is not None: urlComments.append("[AniDB]({})".format( sanitise_url_for_markdown(anidbURL))) for i, link in enumerate(urlComments): if i is not 0: allLinks += ', ' allLinks += link #----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += word #----- INFO LINE -----# if (isExpanded): comment += '\n\n(' if cType: comment += '**' + cType + '** | ' comment += '**Status:** ' + status if cType != 'Movie': comment += ' | **Episodes:** ' + str(episodes) comment += ' | **Genres:** ' else: comment += '\n\n(' if cType: comment += cType + ' | ' comment += 'Status: ' + status if cType != 'Movie': comment += ' | Episodes: ' + str(episodes) comment += ' | Genres: ' if not (genres == []): for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre else: comment += 'None' if (isExpanded) and (stats is not None): comment += ') \n\n**Stats:** ' + str( stats['total']) + ' requests across ' + str( stats['uniqueSubreddits']) + ' server(s)) - ' + str( round(stats['totalAsPercentage'], 3)) + '% of all requests' else: comment += ')' #----- EPISODE COUNTDOWN -----# if (countdown is not None) and (nextEpisode is not None): #countdown is given to us in seconds days, countdown = divmod(countdown, 24 * 60 * 60) hours, countdown = divmod(countdown, 60 * 60) minutes, countdown = divmod(countdown, 60) comment += '\n\n(Episode ' + str(nextEpisode) + ' airs in ' + str( days) + ' days, ' + str(hours) + ' hours, ' + str( minutes) + ' minutes)' #----- DESCRIPTION -----# if (isExpanded): descComment += cleanupDescription(desc) #----- END -----# receipt = '(A) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if apURL is not None: receipt += 'AP ' if ani is not None: receipt += 'AL ' if anidbURL is not None: receipt += 'ADB ' print(receipt.encode('utf8')) try: embed = buildEmbedObject(title, allLinks, comment, malimage, isExpanded, descComment) except Exception as e: print(e) #We return the title/comment separately so we can track if multiples of the same comment have been requests (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) dictToReturn = {} dictToReturn['title'] = title dictToReturn['embed'] = embed return dictToReturn except Exception as e: print(e) #traceback.print_exc() return None
def SavetoServerWorker(scripts, networkTransferWindow, sendWindowCloseSignal): Globals.Cache.databaseAccessRLock.acquire() scripts.WriteDatabaseStorageToHdd() if len(scripts.update) == 0: networkTransferWindow.addListEntry("Nothing to save!", "-") if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) Globals.Cache.databaseAccessRLock.release() return False # Beginning Save... autoRestartAfter = False for ftperrorcount in range(1, 20): try: try: scripts.ftp = ConnectToFtp() except: if ftperrorcount >= 20: networkTransferWindow.addListEntry("Couldn't connect to FTP Server, stopping upload. Please try again later.", "< Error >") Globals.Settings.setValue('update', set(scripts.update)) if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) Globals.Cache.databaseAccessRLock.release() return False networkTransferWindow.addListEntry("Couldn't connect to FTP Server, retrying...", "< Error >") continue scripts.ftp.cwd('/') scripts.ftp.cwd(Globals.configData.RemoteDatabasePath) # Retrieving any files modified by others... RetrieveModifiedFilesWorker(scripts, None, networkTransferWindow, False) # Uploading Files... LogTable = [] saveUpdate = set() # stagger upload into multiple 10-file batches # the way this is written we cannot keep it, but eh singleFileUploadCounter = 0 for filename in scripts.update: singleFileUploadCounter = singleFileUploadCounter + 1 if singleFileUploadCounter > 10: autoRestartAfter = True saveUpdate.add(filename) continue # 'Uploading ' + Globals.GetDatabaseDescriptionString(filename) + ' [' + filename + ']...' transferWindowIdx = networkTransferWindow.addListEntry("Downloading...", filename) # Downloading the server version and double checking DownloadFile(scripts, scripts.ftp, str(filename), 'temp') try: networkTransferWindow.modifyListEntryStatus(transferWindowIdx, "Merging...") RemoteMergeCon = DatabaseHandler.OpenEntryDatabase('temp') DatabaseHandler.MergeDatabaseWithServerVersionBeforeUpload( DatabaseHandler.OpenEntryDatabase(filename).cursor(), RemoteMergeCon.cursor() ) RemoteMergeCon.commit() networkTransferWindow.modifyListEntryStatus(transferWindowIdx, "Uploading...") for ftpSingleFileUpErrorCount in range(1, 20): try: if ftpSingleFileUpErrorCount >= 20: networkTransferWindow.modifyListEntryStatus(transferWindowIdx, "!! Error !! Server file may be corrupted, please manually check and fix or inform someone who can.") if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) Globals.Cache.databaseAccessRLock.release() return False result = UploadFile(scripts, scripts.ftp, 'temp', str(filename)) if isinstance(result, str): continue break except ftplib.all_errors: networkTransferWindow.modifyListEntryStatus(transferWindowIdx, "Error, retrying... (" + str(ftpSingleFileUpErrorCount) + ")") continue # And copy the new remote over the old local Globals.CopyFile(Globals.configData.LocalDatabasePath + '/temp', Globals.configData.LocalDatabasePath + '/{0}'.format(filename)) except: networkTransferWindow.modifyListEntryStatus(transferWindowIdx, "Server file corrupted, replacing with local file...") UploadFile(scripts, scripts.ftp, filename, filename) LogTable.append(filename) networkTransferWindow.modifyListEntryStatus(transferWindowIdx, "Complete!") CompletionTable.CalculateCompletionForDatabase(filename) Globals.Cache.LoadDatabase(filename) # Fix up the changelog and upload transferWindowChangeLogIdx = networkTransferWindow.addListEntry("Modifying...", "ChangeLog") ChangeLogConnection, ChangeLogCursor = Globals.GetNewChangeLogConnectionAndCursor() ChangeLogCursor.execute('SELECT Max(ID) as Highest FROM Log') MaxID = ChangeLogCursor.fetchall()[0][0] fileString = ''.join(["%s," % (k) for k in LogTable])[:-1] # 'Uploaded: ', fileString ChangeLogCursor.execute(u"INSERT INTO Log VALUES({0}, '{1}', '{2}', {3})".format(MaxID + 1, fileString, Globals.Author, "strftime('%s','now')")) ChangeLogConnection.commit() ChangeLogConnection.close() networkTransferWindow.modifyListEntryStatus(transferWindowChangeLogIdx, "Uploading...") changeLogUploadSuccess = False for changeup in range(1, 20): try: result = UploadFile(scripts, scripts.ftp, 'ChangeLog', 'ChangeLog', False) if isinstance(result, str) or not result: if changeup >= 20: break else: networkTransferWindow.modifyListEntryStatus(transferWindowChangeLogIdx, "Error, retrying... (" + str(changeup) + ")") continue networkTransferWindow.modifyListEntryStatus(transferWindowChangeLogIdx, "Complete!") changeLogUploadSuccess = True break except ftplib.all_errors: if changeup >= 20: break networkTransferWindow.modifyListEntryStatus(transferWindowChangeLogIdx, "Error, retrying... (" + str(changeup) + ")") continue if not changeLogUploadSuccess: networkTransferWindow.modifyListEntryStatus(transferWindowChangeLogIdx, "!! Error !! Server ChangeLog may be corrupted, please fix immediately.") if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) Globals.Cache.databaseAccessRLock.release() return False # Everything is done. scripts.ftp.close() if len(saveUpdate) > 0: Globals.MainWindow.displayStatusMessage( 'Retaining the following files for later upload: ' + str(saveUpdate) ) scripts.update.clear() scripts.update = set(saveUpdate) Globals.Settings.setValue('update', scripts.update) Globals.Settings.sync() if autoRestartAfter: retval = SavetoServerWorker(scripts, networkTransferWindow, sendWindowCloseSignal) Globals.Cache.databaseAccessRLock.release() return retval if len(scripts.update) > 0: Globals.HaveUnsavedChanges = True else: Globals.HaveUnsavedChanges = False scripts.SetWindowTitle() if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(True) Globals.Cache.databaseAccessRLock.release() return True except ftplib.all_errors: if ftperrorcount >= 20: networkTransferWindow.addListEntry("Error during FTP transfer. File(s) that were in progress may be corrupted, please confirm and fix.", "< Error >") break networkTransferWindow.addListEntry("Error during FTP transfer, retrying...", "< Error >") continue if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) Globals.Cache.databaseAccessRLock.release() return False
def loadDB(self, dbConfigName): if not dbConfigName in self.cfg.databases: logging.warning('Database configuration ({0}) does not exist'.format(dbConfigName)) return self.db = DatabaseHandler.createDBconnection(self.cfg.databases[dbConfigName])
def RevertFromServerWorker(scripts, networkTransferWindow, sendWindowCloseSignal): Globals.Cache.databaseAccessRLock.acquire() scripts.WriteDatabaseStorageToHdd() updateList = list(scripts.update) for i in range(1, 20): try: try: scripts.ftp = ConnectToFtp() except: if i == 20: print networkTransferWindow.addListEntry("FTP connection failed, revert didn't succeed.", "< Error >") Globals.Settings.setValue('update', set(updateList)) Globals.Settings.sync() Globals.Cache.databaseAccessRLock.release() if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) return networkTransferWindow.addListEntry('Error during FTP connect, retrying...', "< Error >") continue scripts.ftp.cwd('/') scripts.ftp.cwd(Globals.configData.RemoteDatabasePath) # "Re-getting changed files from server..." while len(updateList) > 0: item = updateList[len(updateList) - 1] transferWindowIndex = networkTransferWindow.addListEntry("Downloading...", item) DownloadFile(scripts, scripts.ftp, item, item) WipeUpdateCon = DatabaseHandler.OpenEntryDatabase(item) WipeUpdateCur = WipeUpdateCon.cursor() WipeUpdateCur.execute(u"UPDATE Text SET updated=0") WipeUpdateCon.commit() CompletionTable.CalculateCompletionForDatabase(item) Globals.Cache.LoadDatabase(item) networkTransferWindow.modifyListEntryStatus(transferWindowIndex, "Complete!") updateList.remove(item) scripts.ftp.close() scripts.update.clear() Globals.Settings.setValue('update', scripts.update) Globals.Settings.sync() Globals.HaveUnsavedChanges = False scripts.SetWindowTitle() break except ftplib.all_errors: if i == 20: networkTransferWindow.addListEntry('Error during FTP transfer. Databases may be corrupted, please Revert again as soon as possible.', "< Error >") if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(False) break networkTransferWindow.addListEntry('Error during FTP transfer, retrying...', "< Error >") continue Globals.Cache.databaseAccessRLock.release() if sendWindowCloseSignal: networkTransferWindow.allowCloseSignal.emit(True) return
def buildAnimeReply(searchText, isExpanded, baseComment): try: #Basic breakdown: #If Anilist finds something, use it to find the HB version. #If we can't find it, try with HB and use it to try and "refind" Anilist #If we hit HB, we don't need to look for MAL, since we can get the MAL ID from within HB. If we don't hit HB, find MAL on its own. #If, at the end, we have something from Anilist, get the full set of Anilist data #If it hits anything, add it to the request-tracking DB. ani = Anilist.getAnimeDetails(searchText) hb = None mal = None if (ani is not None): hb = Hummingbird.getAnimeDetails(ani['title_romaji']) if (hb is None): for synonym in ani['synonyms']: hb = Hummingbird.getAnimeDetails(synonym) if hb is not None: break hb = Hummingbird.getAnimeDetails(ani['title_english']) else: hb = Hummingbird.getAnimeDetails(searchText) if (hb is not None): ani = Anilist.getAnimeDetails(hb['title']) if (hb is None): mal = MAL.getAnimeDetails(searchText) if (mal is not None): hb = Hummingbird.getAnimeDetails(mal['title']) if (hb is None): hb = Hummingbird.getAnimeDetails(mal['english']) if (ani is None): ani = Anilist.getAnimeDetails(mal['title']) if (ani is None): ani = Anilist.getAnimeDetails(mal['english']) try: if ani is not None: aniFull = Anilist.getFullAnimeDetails(ani['id']) if aniFull is not None: ani = aniFull except: pass if (ani is not None) or (hb is not None) or (mal is not None): try: titleToAdd = '' if ani is None: titleToAdd = hb['title'] elif hb is not None: titleToAdd = ani['title_romaji'] else: titleToAdd = mal['title'] if (str(baseComment.subreddit).lower is not 'nihilate') and (str(baseComment.subreddit).lower is not 'roboragi'): DatabaseHandler.addRequest(titleToAdd, 'Anime', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass if ani is not None: if ani['adult'] is True: print("NSFW ENTRY") mal = None hb = None ani = None return CommentBuilder.buildAnimeComment(isExpanded, mal, hb, ani) except Exception as e: traceback.print_exc() return None
def process_comment(comment, is_edit=False): #Anime/Manga requests that are found go into separate arrays animeArray = [] mangaArray = [] lnArray = [] #ignores all "code" markup (i.e. anything between backticks) comment.body = re.sub(r"\`(?s)(.*?)\`", "", comment.body) #This checks for requests. First up we check all known tags for the !stats request if re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)', comment.body, re.S) is not None: username = re.search('[uU]\/([A-Za-z0-9_-]+?)(>|}|$)', comment.body, re.S) subreddit = re.search('[rR]\/([A-Za-z0-9_]+?)(>|}|$)', comment.body, re.S) if username: commentReply = CommentBuilder.buildStatsComment( username=username.group(1)) elif subreddit: commentReply = CommentBuilder.buildStatsComment( subreddit=subreddit.group(1)) else: commentReply = CommentBuilder.buildStatsComment() else: #The basic algorithm here is: #If it's an expanded request, build a reply using the data in the braces, clear the arrays, add the reply to the relevant array and ignore everything else. #If it's a normal request, build a reply using the data in the braces, add the reply to the relevant array. #Counts the number of expanded results vs total results. If it's not just a single expanded result, they all get turned into normal requests. numOfRequest = 0 numOfExpandedRequest = 0 forceNormal = False for match in re.finditer("\{{2}([^}]*)\}{2}|\<{2}([^>]*)\>{2}", comment.body, re.S): numOfRequest += 1 numOfExpandedRequest += 1 for match in re.finditer( "(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))|(?<=(?<!\<)\<)([^\<\>]*)(?=\>(?!\>))", comment.body, re.S): numOfRequest += 1 if (numOfExpandedRequest >= 1) and (numOfRequest > 1): forceNormal = True #Expanded Anime for match in re.finditer("\{{2}([^}]*)\}{2}", comment.body, re.S): reply = '' if (forceNormal) or (str(comment.subreddit).lower() in disableexpanded): reply = Search.buildAnimeReply(match.group(1), False, comment) else: reply = Search.buildAnimeReply(match.group(1), True, comment) if (reply is not None): animeArray.append(reply) #Normal Anime for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))", comment.body, re.S): reply = Search.buildAnimeReply(match.group(1), False, comment) if (reply is not None): animeArray.append(reply) #Expanded Manga #NORMAL EXPANDED for match in re.finditer("\<{2}([^>]*)\>{2}(?!(:|\>))", comment.body, re.S): reply = '' if (forceNormal) or (str(comment.subreddit).lower() in disableexpanded): reply = Search.buildMangaReply(match.group(1), False, comment) else: reply = Search.buildMangaReply(match.group(1), True, comment) if (reply is not None): mangaArray.append(reply) #AUTHOR SEARCH EXPANDED for match in re.finditer("\<{2}([^>]*)\>{2}:\(([^)]+)\)", comment.body, re.S): reply = '' if (forceNormal) or (str(comment.subreddit).lower() in disableexpanded): reply = Search.buildMangaReplyWithAuthor( match.group(1), match.group(2), False, comment) else: reply = Search.buildMangaReplyWithAuthor( match.group(1), match.group(2), True, comment) if (reply is not None): mangaArray.append(reply) #Normal Manga #NORMAL for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]+)\>(?!(:|\>))", comment.body, re.S): reply = Search.buildMangaReply(match.group(1), False, comment) if (reply is not None): mangaArray.append(reply) #AUTHOR SEARCH for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]*)\>:\(([^)]+)\)", comment.body, re.S): reply = Search.buildMangaReplyWithAuthor(match.group(1), match.group(2), False, comment) if (reply is not None): mangaArray.append(reply) #Expanded LN for match in re.finditer("\]{2}([^]]*)\[{2}", comment.body, re.S): reply = '' if (forceNormal) or (str(comment.subreddit).lower() in disableexpanded): reply = Search.buildLightNovelReply(match.group(1), False, comment) else: reply = Search.buildLightNovelReply(match.group(1), True, comment) if (reply is not None): lnArray.append(reply) #Normal LN for match in re.finditer("(?<=(?<!\])\])([^\]\[]*)(?=\[(?!\[))", comment.body, re.S): reply = Search.buildLightNovelReply(match.group(1), False, comment) if (reply is not None): lnArray.append(reply) #Here is where we create the final reply to be posted #The final comment reply. We add stuff to this progressively. commentReply = '' #Basically just to keep track of people posting the same title multiple times (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) postedAnimeTitles = [] postedMangaTitles = [] postedLNTitles = [] #Adding all the anime to the final comment. If there's manga too we split up all the paragraphs and indent them in Reddit markup by adding a '>', then recombine them for i, animeReply in enumerate(animeArray): if not (i is 0): commentReply += '\n\n' if not (animeReply['title'] in postedAnimeTitles): postedAnimeTitles.append(animeReply['title']) commentReply += animeReply['comment'] if mangaArray: commentReply += '\n\n' #Adding all the manga to the final comment for i, mangaReply in enumerate(mangaArray): if not (i is 0): commentReply += '\n\n' if not (mangaReply['title'] in postedMangaTitles): postedMangaTitles.append(mangaReply['title']) commentReply += mangaReply['comment'] if lnArray: commentReply += '\n\n' #Adding all the manga to the final comment for i, lnReply in enumerate(lnArray): if not (i is 0): commentReply += '\n\n' if not (lnReply['title'] in postedLNTitles): postedLNTitles.append(lnReply['title']) commentReply += lnReply['comment'] #If there are more than 10 requests, shorten them all if not (commentReply is '') and ( len(animeArray) + len(mangaArray) + len(lnArray) >= 10): commentReply = re.sub(r"\^\((.*?)\)", "", commentReply, flags=re.M) #If there was actually something found, add the signature and post the comment to Reddit. Then, add the comment to the "already seen" database. if commentReply is not '': '''if (comment.author.name == 'treborabc'): commentReply = '[No.](https://www.reddit.com/r/anime_irl/comments/4sba1n/anime_irl/d58xkha)''' commentReply += Config.getSignature(comment.permalink) commentReply += Reference.get_bling(comment.author.name) if is_edit: return commentReply else: try: comment.reply(commentReply) print("Comment made.\n") except praw.errors.Forbidden: print('Request from banned subreddit: ' + str(comment.subreddit) + '\n') except Exception: traceback.print_exc() try: DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, True) except: traceback.print_exc() else: try: if is_edit: return None else: DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, False) except: traceback.print_exc()
def syncSQLtoMemory(): global tasks tasks = [] savedTasks = db.getAllTasks() for savedTask in savedTasks: tasks.append(Task(savedTask[0], savedTask[1], savedTask[2]))
def process_comment(comment, is_edit=False): #Anime/Manga requests that are found go into separate arrays animeArray = [] mangaArray = [] lnArray = [] #ignores all "code" markup (i.e. anything between backticks) comment.body = re.sub(r"\`(?s)(.*?)\`", "", comment.body) #This checks for requests. First up we check all known tags for the !stats request if re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)', comment.body, re.S) is not None: username = re.search('[uU]\/([A-Za-z0-9_-]+?)(>|}|$)', comment.body, re.S) subreddit = re.search('[rR]\/([A-Za-z0-9_]+?)(>|}|$)', comment.body, re.S) if username: commentReply = CommentBuilder.buildStatsComment(username=username.group(1)) elif subreddit: commentReply = CommentBuilder.buildStatsComment(subreddit=subreddit.group(1)) else: commentReply = CommentBuilder.buildStatsComment() else: #The basic algorithm here is: #If it's an expanded request, build a reply using the data in the braces, clear the arrays, add the reply to the relevant array and ignore everything else. #If it's a normal request, build a reply using the data in the braces, add the reply to the relevant array. #Counts the number of expanded results vs total results. If it's not just a single expanded result, they all get turned into normal requests. numOfRequest = 0 numOfExpandedRequest = 0 forceNormal = False for match in re.finditer("\{{2}([^}]*)\}{2}|\<{2}([^>]*)\>{2}", comment.body, re.S): numOfRequest += 1 numOfExpandedRequest += 1 for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))|(?<=(?<!\<)\<)([^\<\>]*)(?=\>(?!\>))", comment.body, re.S): numOfRequest += 1 if (numOfExpandedRequest >= 1) and (numOfRequest > 1): forceNormal = True #Expanded Anime for match in re.finditer("\{{2}([^}]*)\}{2}", comment.body, re.S): reply = '' if (forceNormal) or (str(comment.subreddit).lower() in disableexpanded): reply = Search.buildAnimeReply(match.group(1), False, comment) else: reply = Search.buildAnimeReply(match.group(1), True, comment) if (reply is not None): animeArray.append(reply) #Normal Anime for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))", comment.body, re.S): reply = Search.buildAnimeReply(match.group(1), False, comment) if (reply is not None): animeArray.append(reply) #Expanded Manga #NORMAL EXPANDED for match in re.finditer("\<{2}([^>]*)\>{2}(?!(:|\>))", comment.body, re.S): reply = '' if (forceNormal) or (str(comment.subreddit).lower() in disableexpanded): reply = Search.buildMangaReply(match.group(1), False, comment) else: reply = Search.buildMangaReply(match.group(1), True, comment) if (reply is not None): mangaArray.append(reply) #AUTHOR SEARCH EXPANDED for match in re.finditer("\<{2}([^>]*)\>{2}:\(([^)]+)\)", comment.body, re.S): reply = '' if (forceNormal) or (str(comment.subreddit).lower() in disableexpanded): reply = Search.buildMangaReplyWithAuthor(match.group(1), match.group(2), False, comment) else: reply = Search.buildMangaReplyWithAuthor(match.group(1), match.group(2), True, comment) if (reply is not None): mangaArray.append(reply) #Normal Manga #NORMAL for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]+)\>(?!(:|\>))", comment.body, re.S): reply = Search.buildMangaReply(match.group(1), False, comment) if (reply is not None): mangaArray.append(reply) #AUTHOR SEARCH for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]*)\>:\(([^)]+)\)", comment.body, re.S): reply = Search.buildMangaReplyWithAuthor(match.group(1), match.group(2), False, comment) if (reply is not None): mangaArray.append(reply) #Expanded LN for match in re.finditer("\]{2}([^]]*)\[{2}", comment.body, re.S): reply = '' if (forceNormal) or (str(comment.subreddit).lower() in disableexpanded): reply = Search.buildLightNovelReply(match.group(1), False, comment) else: reply = Search.buildLightNovelReply(match.group(1), True, comment) if (reply is not None): lnArray.append(reply) #Normal LN for match in re.finditer("(?<=(?<!\])\])([^\]\[]*)(?=\[(?!\[))", comment.body, re.S): reply = Search.buildLightNovelReply(match.group(1), False, comment) if (reply is not None): lnArray.append(reply) #Here is where we create the final reply to be posted #The final comment reply. We add stuff to this progressively. commentReply = '' #Basically just to keep track of people posting the same title multiple times (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) postedAnimeTitles = [] postedMangaTitles = [] postedLNTitles = [] #Adding all the anime to the final comment. If there's manga too we split up all the paragraphs and indent them in Reddit markup by adding a '>', then recombine them for i, animeReply in enumerate(animeArray): if not (i is 0): commentReply += '\n\n' if not (animeReply['title'] in postedAnimeTitles): postedAnimeTitles.append(animeReply['title']) commentReply += animeReply['comment'] if mangaArray: commentReply += '\n\n' #Adding all the manga to the final comment for i, mangaReply in enumerate(mangaArray): if not (i is 0): commentReply += '\n\n' if not (mangaReply['title'] in postedMangaTitles): postedMangaTitles.append(mangaReply['title']) commentReply += mangaReply['comment'] if lnArray: commentReply += '\n\n' #Adding all the manga to the final comment for i, lnReply in enumerate(lnArray): if not (i is 0): commentReply += '\n\n' if not (lnReply['title'] in postedLNTitles): postedLNTitles.append(lnReply['title']) commentReply += lnReply['comment'] #If there are more than 10 requests, shorten them all if not (commentReply is '') and (len(animeArray) + len(mangaArray)+ len(lnArray) >= 10): commentReply = re.sub(r"\^\((.*?)\)", "", commentReply, flags=re.M) #If there was actually something found, add the signature and post the comment to Reddit. Then, add the comment to the "already seen" database. if commentReply is not '': '''if (comment.author.name == 'treborabc'): commentReply = '[No.](https://www.reddit.com/r/anime_irl/comments/4sba1n/anime_irl/d58xkha)''' commentReply += Config.getSignature(comment.permalink) commentReply += Reference.get_bling(comment.author.name) if is_edit: return commentReply else: try: comment.reply(commentReply) print("Comment made.\n") except praw.errors.Forbidden: print('Request from banned subreddit: ' + str(comment.subreddit) + '\n') except Exception: traceback.print_exc() try: DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, True) except: traceback.print_exc() else: try: if is_edit: return None else: DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, False) except: traceback.print_exc()
def buildVisualNovelComment(isExpanded, vndb): try: comment = '' urls = [] if vndb['url']: urls.append('[VNDB]({0})'.format(vndb['url'])) if vndb['wikipedia_url']: if vndb['wikipedia_url'].endswith(')'): formatted_wiki_url = vndb['wikipedia_url'][:-1] + '\)' else: formatted_wiki_url = vndb['wikipedia_url'] urls.append('[Wiki]({0})'.format(formatted_wiki_url)) formatted_links = '' for i, link in enumerate(urls): if i is not 0: formatted_links += ', ' formatted_links += link if not isExpanded: template = '**{title}** - {links}\n\n^({type}{released}{length})' formatted = template.format( title=vndb['title'], links='({})'.format(formatted_links), type='VN', released=' | Released: ' + vndb['release_year'] if vndb['release_year'] else '', length=' | Length: ' + vndb['length'] if vndb['length'] else '') comment = formatted else: stats = DatabaseHandler.getRequestStats(vndb['title'], 'VN') if stats: formatted_stats = '**Stats:** ' + str( stats['total']) + ' requests across ' + str( stats['uniqueSubreddits'] ) + ' subreddit(s)^) ^- ^' + str( round(stats['totalAsPercentage'], 3)) + '% ^of ^all ^requests' else: formatted_stats = None template = '**{title}** - {links}\n\n^({type}{released}{length}){stats}\n\n{desc}' formatted = template.format( title=vndb['title'], links='({})'.format(formatted_links), type='**VN**', released=' | **Released:** ' + vndb['release_year'] if vndb['release_year'] else '', length=' | **Length:** ' + vndb['length'] if vndb['length'] else '', stats=' \n^(' + formatted_stats if formatted_stats else '', desc=cleanupDescription(vndb['description'])) comment = formatted # ----- END -----# receipt = '(VN) Request successful: ' + vndb['title'] + ' - ' if vndb: receipt += 'VNDB' print(receipt) # We return the title/comment separately so we can track if multiples of the same comment have been requests (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) dictToReturn = {} dictToReturn['title'] = vndb['title'] dictToReturn['comment'] = comment return dictToReturn except: traceback.print_exc() return None
else: self.disableBtns(self.b0, self.b1, self.b2, self.b3, self.b4) threading.Timer(1.0, self.checkIfLoggedIn).start() def enableBtns(self, *btns): for btn in btns: btn['state'] = tk.NORMAL def disableBtns(self, *btns): for btn in btns: btn['state'] = tk.DISABLED if __name__ == "__main__": databaseHandler = DatabaseHandler() databaseHandler.initAll() root = tk.Tk() main = MainView(root) main.pack(side="top", fill="both", expand=True) root.title("Movie Tracker") root.geometry("1150x500") warning = tk.Label( root, text= "If you want to create and manage your lists of movies, you have to register and log in first!" ) warning.config(font=("Courier", 12)) warning.pack() main.checkIfLoggedIn()
async def process_message(message, is_edit=False): #Anime/Manga requests that are found go into separate arrays animeArray = [] mangaArray = [] lnArray = [] #ignores all "code" markup (i.e. anything between backticks) cleanMessage = re.sub(r"\`(?s)(.*?)\`", "", message.clean_content) sender = re.search('[@]([A-Za-z0-9_-]+?)(>|}|$)', cleanMessage, re.S) if re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)', cleanMessage, re.S) is not None and sender is not None: messageReply = CommentBuilder.buildStatsComment(username=sender.group(1)) if re.search('({!sstats.*?}|{{!sstats.*?}}|<!sstats.*?>|<<!sstats.*?>>)', cleanMessage, re.S) is not None: server = re.search('([A-Za-z0-9_]+?)(>|}|$)', cleanMessage, re.S) messageReply = CommentBuilder.buildStatsComment(server=server.group(1)) elif re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)', cleanMessage, re.S) is not None: messageReply = CommentBuilder.buildStatsComment() else: #The basic algorithm here is: #If it's an expanded request, build a reply using the data in the braces, clear the arrays, add the reply to the relevant array and ignore everything else. #If it's a normal request, build a reply using the data in the braces, add the reply to the relevant array. #Counts the number of expanded results vs total results. If it's not just a single expanded result, they all get turned into normal requests. numOfRequest = 0 numOfExpandedRequest = 0 forceNormal = False for match in re.finditer("\{{2}([^}]*)\}{2}|\<{2}([^>]*)\>{2}", cleanMessage, re.S): numOfRequest += 1 numOfExpandedRequest += 1 for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))|(?<=(?<!\<)\<)([^\<\>]*)(?=\>(?!\>))", cleanMessage, re.S): numOfRequest += 1 if (numOfExpandedRequest >= 1) and (numOfRequest > 1): forceNormal = True #Expanded Anime for match in re.finditer("\{{2}([^}]*)\}{2}", cleanMessage, re.S): reply = '' if (forceNormal) or (str(message.channel).lower() in disableexpanded): reply = DiscordoragiSearch.buildAnimeReply(match.group(1), message, False) else: reply = DiscordoragiSearch.buildAnimeReply(match.group(1), message, True) if (reply is not None): animeArray.append(reply) #Normal Anime for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))", cleanMessage, re.S): reply = DiscordoragiSearch.buildAnimeReply(match.group(1), message, False) if (reply is not None): animeArray.append(reply) #Expanded Manga #NORMAL EXPANDED for match in re.finditer("\<{2}([^>]*)\>{2}(?!(:|\>))", cleanMessage, re.S): reply = '' if (forceNormal) or (str(message.channel).lower() in disableexpanded): reply = DiscordoragiSearch.buildMangaReply(match.group(1), message, False) else: reply = DiscordoragiSearch.buildMangaReply(match.group(1), message, True) if (reply is not None): mangaArray.append(reply) #AUTHOR SEARCH EXPANDED for match in re.finditer("\<{2}([^>]*)\>{2}:\(([^)]+)\)", cleanMessage, re.S): reply = '' if (forceNormal) or (str(message.server).lower() in disableexpanded): reply = DiscordoragiSearch.buildMangaReplyWithAuthor(match.group(1), match.group(2), message, False) else: reply = DiscordoragiSearch.buildMangaReplyWithAuthor(match.group(1), match.group(2), message, True) if (reply is not None): mangaArray.append(reply) #Normal Manga #NORMAL for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]+)\>(?!(:|\>))", cleanMessage, re.S): reply = DiscordoragiSearch.buildMangaReply(match.group(1), message, False) if (reply is not None): mangaArray.append(reply) #AUTHOR SEARCH for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]*)\>:\(([^)]+)\)", cleanMessage, re.S): reply = DiscordoragiSearch.buildMangaReplyWithAuthor(match.group(1), match.group(2), message, False) if (reply is not None): mangaArray.append(reply) #Expanded LN for match in re.finditer("\]{2}([^]]*)\[{2}", cleanMessage, re.S): reply = '' if (forceNormal) or (str(message.server).lower() in disableexpanded): reply = DiscordoragiSearch.buildLightNovelReply(match.group(1), False, message) else: reply = DiscordoragiSearch.buildLightNovelReply(match.group(1), True, message) if (reply is not None): lnArray.append(reply) #Normal LN for match in re.finditer("(?<=(?<!\])\])([^\]\[]*)(?=\[(?!\[))", cleanMessage, re.S): reply = DiscordoragiSearch.buildLightNovelReply(match.group(1), False, message) if (reply is not None): lnArray.append(reply) #Here is where we create the final reply to be posted #The final message reply. We add stuff to this progressively. messageReply = '' #Basically just to keep track of people posting the same title multiple times (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) postedAnimeTitles = [] postedMangaTitles = [] postedLNTitles = [] #Adding all the anime to the final message. If there's manga too we split up all the paragraphs and indent them in Reddit markup by adding a '>', then recombine them for i, animeReply in enumerate(animeArray): if not (i is 0): messageReply += '\n\n' if not (animeReply['title'] in postedAnimeTitles): postedAnimeTitles.append(animeReply['title']) messageReply += animeReply['comment'] if mangaArray: messageReply += '\n\n' #Adding all the manga to the final message for i, mangaReply in enumerate(mangaArray): if not (i is 0): messageReply += '\n\n' if not (mangaReply['title'] in postedMangaTitles): postedMangaTitles.append(mangaReply['title']) messageReply += mangaReply['comment'] if lnArray: messageReply += '\n\n' #Adding all the manga to the final comment for i, lnReply in enumerate(lnArray): if not (i is 0): commentReply += '\n\n' if not (lnReply['title'] in postedLNTitles): postedLNTitles.append(lnReply['title']) messageReply += lnReply['comment'] #If there are more than 10 requests, shorten them all if not (messageReply is '') and (len(animeArray) + len(mangaArray) >= 10): messageReply = re.sub(r"\^\((.*?)\)", "", messageReply, flags=re.M) #If there was actually something found, add the signature and post the message to Reddit. Then, add the message to the "already seen" database. if not (messageReply is ''): messageReply += Config.getSignature() if is_edit: await Discord.client.send_message(message.channel, messageReply) else: try: print("Message created.\n") await Discord.client.send_message(message.channel, messageReply) except discord.errors.Forbidden: print('Request from banned channel: ' + str(message.channel) + '\n') except Exception: traceback.print_exc() try: DatabaseHandler.addMessage(message.id, message.author.id, message.server.id, True) except: traceback.print_exc() else: try: if is_edit: return None else: DatabaseHandler.addMessage(message.id, message.author.id, message.server.id, False) except: traceback.print_exc()
def start(): print('Starting comment stream:') #This opens a constant stream of comments. It will loop until there's a major error (usually this means the Reddit access token needs refreshing) comment_stream = praw.helpers.comment_stream(reddit, subredditlist, limit=250, verbosity=0) for comment in comment_stream: #Is the comment valid (i.e. it's not made by Roboragi and I haven't seen it already). If no, try to add it to the "already seen pile" and skip to the next comment. If yes, keep going. if not (Search.isValidComment(comment, reddit)): try: if not (DatabaseHandler.commentExists(comment.id)): DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, False) except: pass continue #Anime/Manga requests that are found go into separate arrays animeArray = [] mangaArray = [] #The "hasExpandedRequest" variable puts a stop on any other requests (since you can only have one expanded request in a comment) hasExpandedRequest = False #ignores all "code" markup (i.e. anything between backticks) comment.body = re.sub(r"\`(?s)(.*?)\`", "", comment.body) #This checks for requests. First up we check all known tags for the !stats request # Assumes tag begins and ends with a whitespace OR at the string beginning/end if re.search('(^|\s)({!stats}|{{!stats}}|<!stats>|<<!stats>>)($|\s|.)', comment.body, re.S) is not None: commentReply = CommentBuilder.buildStatsComment(comment.subreddit) else: #The basic algorithm here is: #If it's an expanded request, build a reply using the data in the braces, clear the arrays, add the reply to the relevant array and ignore everything else. #If it's a normal request, build a reply using the data in the braces, add the reply to the relevant array. #Expanded Anime for match in re.finditer("\{{2}([^}]*)\}{2}", comment.body, re.S): if (str(comment.subreddit).lower() not in disableexpanded): if hasExpandedRequest: break reply = Search.buildAnimeReply(match.group(1), True, comment) if not (reply is None): hasExpandedRequest = True animeArray = [reply] mangaArray = [] else: if hasExpandedRequest: break reply = Search.buildAnimeReply(match.group(1), False, comment) if (reply is not None) and (len(animeArray) + len(mangaArray) < 10): animeArray.append(reply) #Normal Anime for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))", comment.body, re.S): if hasExpandedRequest: break reply = Search.buildAnimeReply(match.group(1), False, comment) if (reply is not None) and (len(animeArray) + len(mangaArray) < 10): animeArray.append(reply) #Expanded Manga for match in re.finditer("\<{2}([^>]*)\>{2}", comment.body, re.S): if (str(comment.subreddit).lower() not in disableexpanded): if hasExpandedRequest: break; reply = Search.buildMangaReply(match.group(1), True, comment) if not (reply is None): hasExpandedRequest = True animeArray = [] mangaArray = [reply] else: if hasExpandedRequest: break reply = Search.buildMangaReply(match.group(1), False, comment) if (reply is not None) and (len(animeArray) + len(mangaArray) < 10): mangaArray.append(reply) #Normal Manga for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]*)(?=\>(?!\>))", comment.body, re.S): if hasExpandedRequest: break reply = Search.buildMangaReply(match.group(1), False, comment) if (reply is not None) and (len(animeArray) + len(mangaArray) < 10): mangaArray.append(reply) #Here is where we create the final reply to be posted #The final comment reply. We add stuff to this progressively. commentReply = '' #If we have anime AND manga in one reply we format stuff a little differently multipleTypes = False #Basically just to keep track of people posting the same title multiple times (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) postedAnimeTitles = [] postedMangaTitles = [] if (len(animeArray) > 0) and (len(mangaArray) > 0): multipleTypes = True commentReply += '**Anime**\n\n' #Adding all the anime to the final comment. If there's manga too we split up all the paragraphs and indent them in Reddit markup by adding a '>', then recombine them for i, animeReply in enumerate(animeArray): if not (i is 0): commentReply += '\n\n' if multipleTypes: splitSections = re.split('\s{2,}',animeReply['comment']) newSections = [] for section in splitSections: newSections.append('>' + section) animeReply['comment'] = '\n\n'.join(newSections) if not (animeReply['title'] in postedAnimeTitles): postedAnimeTitles.append(animeReply['title']) commentReply += animeReply['comment'] if multipleTypes: commentReply += '\n\n**Manga**\n\n' #Adding all the manga to the final comment for i, mangaReply in enumerate(mangaArray): if not (i is 0): commentReply += '\n\n' if multipleTypes: splitSections = re.split('\s{2,}',mangaReply['comment']) newSections = [] for section in splitSections: newSections.append('>' + section) mangaReply['comment'] = '\n\n'.join(newSections) if not (mangaReply['title'] in postedMangaTitles): postedMangaTitles.append(mangaReply['title']) commentReply += mangaReply['comment'] #If there was actually something found, add the signature and post the comment to Reddit. Then, add the comment to the "already seen" database. if not (commentReply is ''): commentReply += '\n\n---\n\n [^How ^to ^use](http://www.reddit.com/r/Roboragi/wiki/index#wiki_how_do_i_use_it.3F) ^| ^[FAQ](http://www.reddit.com/r/Roboragi/wiki/index) ^| ^[Subreddit](http://www.reddit.com/r/Roboragi/) ^| ^[Issue/mistake?](http://www.reddit.com/r/Roboragi/submit?selftext=true&title=[ISSUE]&text=' + comment.permalink + ') ^| ^[Source](https://github.com/Nihilate/Roboragi)' try: comment.reply(commentReply) print("Comment made.\n") try: DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, True) except: traceback.print_exc() except: traceback.print_exc() else: try: DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, False) except: traceback.print_exc()
"-s", "--save", action="store_true", help="Do you want to save the raw data to the mongodb or not?") parser.add_argument("--test", action="store_true") args = parser.parse_args() r_event = threading.Event() r_event.set() up_event = threading.Event() eventlist = [up_event] if args.test is not True: sonos, bulb = control_init() if args.save: save_mode = True rawDBHandler = db.DatabaseHandler() t1 = threading.Thread(target=dt.detect_status, args=(config['readerIP'], config['readerPort'], r_event, eventlist, rawDBHandler)) else: save_mode = False t1 = threading.Thread(target=dt.detect_status, args=( config['readerIP'], config['readerPort'], r_event, eventlist, )) resetThread = threading.Thread(target=dt.resetEPC, args=()) t1.start()
def buildAnimeReply(searchText, isExpanded, baseComment, blockTracking=False): try: mal = {'search_function': MAL.getAnimeDetails, 'synonym_function': MAL.getSynonyms, 'checked_synonyms': [], 'result': None} hb = {'search_function': Hummingbird.getAnimeDetails, 'synonym_function': Hummingbird.getSynonyms, 'checked_synonyms': [], 'result': None} ani = {'search_function': Anilist.getAnimeDetails, 'synonym_function': Anilist.getSynonyms, 'checked_synonyms': [], 'result': None} ap = {'search_function': AniP.getAnimeURL, 'result': None} adb = {'search_function': AniDB.getAnimeURL, 'result': None} try: sqlCur.execute('SELECT dbLinks FROM synonyms WHERE type = "Anime" and lower(name) = ?', [searchText.lower()]) except sqlite3.Error as e: print(e) alternateLinks = sqlCur.fetchone() if (alternateLinks): synonym = json.loads(alternateLinks[0]) if synonym: malsyn = None if 'mal' in synonym and synonym['mal']: malsyn = synonym['mal'] hbsyn = None if 'hb' in synonym and synonym['hb']: hbsyn = synonym['hb'] anisyn = None if 'ani' in synonym and synonym['ani']: anisyn = synonym['ani'] apsyn = None if 'ap' in synonym and synonym['ap']: apsyn = synonym['ap'] adbsyn = None if 'adb' in synonym and synonym['adb']: adbsyn = synonym['adb'] mal['result'] = MAL.getAnimeDetails(malsyn[0],malsyn[1]) if malsyn else None hb['result'] = Hummingbird.getAnimeDetailsById(hbsyn) if hbsyn else None ani['result'] = Anilist.getAnimeDetailsById(anisyn) if anisyn else None ap['result'] = AniP.getAnimeURLById(apsyn) if apsyn else None adb['result'] = AniDB.getAnimeURLById(adbsyn) if adbsyn else None else: data_sources = [ani, hb, mal] aux_sources = [ap, adb] synonyms = set([searchText]) for x in range(len(data_sources)): for source in data_sources: if source['result']: break else: for synonym in synonyms: if synonym in source['checked_synonyms']: continue source['result'] = source['search_function'](synonym) source['checked_synonyms'].append(synonym) if source['result']: break if source['result']: synonyms.update([synonym.lower() for synonym in source['synonym_function'](source['result'])]) for source in aux_sources: for synonym in synonyms: source['result'] = source['search_function'](synonym) if source['result']: break if ani['result'] or hb['result'] or mal['result']: try: titleToAdd = '' if mal['result']: titleToAdd = mal['result']['title'] if hb['result']: titleToAdd = hb['result']['title'] if ani['result']: titleToAdd = ani['result']['title_romaji'] if (str(baseComment.subreddit).lower is not 'nihilate') and (str(baseComment.subreddit).lower is not 'roboragi') and not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'Anime', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass return CommentBuilder.buildAnimeComment(isExpanded, mal['result'], hb['result'], ani['result'], ap['result'], adb['result']) except Exception as e: traceback.print_exc() return None
def buildAnimeReply(searchText, isExpanded, baseComment, blockTracking=False): try: mal = { 'search_function': MAL.getAnimeDetails, 'synonym_function': MAL.getSynonyms, 'checked_synonyms': [], 'result': None } '''hb = {'search_function': Hummingbird.getAnimeDetails, 'synonym_function': Hummingbird.getSynonyms, 'checked_synonyms': [], 'result': None}''' ani = { 'search_function': Anilist.getAnimeDetails, 'synonym_function': Anilist.getSynonyms, 'checked_synonyms': [], 'result': None } ap = {'search_function': AniP.getAnimeURL, 'result': None} adb = {'search_function': AniDB.getAnimeURL, 'result': None} try: sqlCur.execute( 'SELECT dbLinks FROM synonyms WHERE type = "Anime" and lower(name) = ?', [searchText.lower()]) except sqlite3.Error as e: print(e) alternateLinks = sqlCur.fetchone() if (alternateLinks): synonym = json.loads(alternateLinks[0]) if synonym: malsyn = None if 'mal' in synonym and synonym['mal']: malsyn = synonym['mal'] '''hbsyn = None if 'hb' in synonym and synonym['hb']: hbsyn = synonym['hb']''' anisyn = None if 'ani' in synonym and synonym['ani']: anisyn = synonym['ani'] apsyn = None if 'ap' in synonym and synonym['ap']: apsyn = synonym['ap'] adbsyn = None if 'adb' in synonym and synonym['adb']: adbsyn = synonym['adb'] mal['result'] = MAL.getAnimeDetails( malsyn[0], malsyn[1]) if malsyn else None '''hb['result'] = Hummingbird.getAnimeDetailsById(hbsyn) if hbsyn else None''' ani['result'] = Anilist.getAnimeDetailsById( anisyn) if anisyn else None ap['result'] = AniP.getAnimeURLById(apsyn) if apsyn else None adb['result'] = AniDB.getAnimeURLById( adbsyn) if adbsyn else None else: data_sources = [ani, mal] aux_sources = [ap, adb] #aux_sources = [ap] synonyms = set([searchText]) for x in range(len(data_sources)): for source in data_sources: if source['result']: break else: for synonym in synonyms: if synonym in source['checked_synonyms']: continue source['result'] = source['search_function']( synonym) source['checked_synonyms'].append(synonym) if source['result']: break if source['result']: synonyms.update([ synonym.lower() for synonym in source['synonym_function'](source['result']) ]) for source in aux_sources: for synonym in synonyms: source['result'] = source['search_function'](synonym) if source['result']: break if ani['result'] or mal['result']: try: titleToAdd = '' if mal['result']: if 'title' in mal['result']: titleToAdd = mal['result']['title'] '''if hb['result']: if 'title' in hb['result']: titleToAdd = hb['result']['title']''' if ani['result']: if 'title_romaji' in ani['result']: titleToAdd = ani['result']['title_romaji'] if (str(baseComment.subreddit).lower is not 'nihilate') and ( str(baseComment.subreddit).lower is not 'roboragi') and not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'Anime', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass return CommentBuilder.buildAnimeComment(isExpanded, mal['result'], ani['result'], ap['result'], adb['result']) except Exception as e: traceback.print_exc() return None
def buildMangaComment(isExpanded, mal, ani, mu, ap): try: comment = '' title = None jTitle = None cType = None malURL = None aniURL = None muURL = mu apURL = ap status = None chapters = None volumes = None genres = [] desc = None if not (mal is None): title = mal['title'] malURL = 'http://myanimelist.net/manga/' + str(mal['id']) desc = mal['synopsis'] status = mal['status'] cType = mal['type'] try: if (int(mal['chapters']) == 0): chapters = 'Unknown' else: chapters = mal['chapters'] except: chapters = 'Unknown' try: volumes = mal['volumes'] except: volumes = 'Unknown' if ani is not None: if title is None: title = ani['title_english'] aniURL = 'http://anilist.co/manga/' + str(ani['id']) desc = ani['description'] status = ani['publishing_status'].title() cType = ani['type'] try: if ani['title_japanese'] is not None: jTitle = ani['title_japanese'] if ani['total_chapters'] is not None: if ani['total_chapters'] == 0: chapters = 'Unknown' else: chapters = ani['total_chapters'] if ani['total_volumes'] is not None: volumes = ani['total_volumes'] else: volumes = 'Unknown' if ani['genres'] is not None: genres = ani['genres'] except Exception as e: print(e) stats = DatabaseHandler.getRequestStats(title,True) #---------- BUILDING THE COMMENT ----------# #----- TITLE -----# comment += '**' + title.strip() + '** - (' #----- LINKS -----# urlComments = [] if malURL is not None: urlComments.append('[MAL](' + malURL + ')') if apURL is not None: urlComments.append('[A-P](' + apURL + ')') if aniURL is not None: urlComments.append('[ANI](' + aniURL + ')') if muURL is not None: urlComments.append('[MU](' + muURL + ')') for i, link in enumerate(urlComments): if i is not 0: comment += ', ' comment += link comment += ')' #----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += '^^' + word #----- INFO LINE -----# if (isExpanded): comment += '\n\n^(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += '**' + cType + '** | ' comment += '**Status:** ' + status if (cType != 'Light Novel'): if str(chapters) is not 'Unknown': comment += ' | **Chapters:** ' + str(chapters) else: comment += ' | **Volumes:** ' + str(volumes) if genres: comment += ' | **Genres:** ' else: comment += '\n\n^(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += cType + ' | ' comment += 'Status: ' + status if (cType != 'Light Novel'): if str(chapters) is not 'Unknown': comment += ' | Chapters: ' + str(chapters) else: comment += ' | Volumes: ' + str(volumes) if genres: comment += ' | Genres: ' if genres: for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre if (isExpanded) and (stats is not None): comment += ' \n**Stats:** ' + str(stats['total']) + ' requests across ' + str(stats['uniqueSubreddits']) + ' subreddit(s)^) ^- ^' + str(round(stats['totalAsPercentage'],3)) + '% ^of ^all ^requests' else: comment += ')' #----- DESCRIPTION -----# if (isExpanded): comment += '\n\n' + cleanupDescription(desc) #----- END -----# receipt = '(M) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if ap is not None: receipt += 'AP ' if ani is not None: receipt += 'ANI ' if muURL is not None: receipt += 'MU ' print(receipt) dictToReturn = {} dictToReturn['title'] = title dictToReturn['comment'] = comment return dictToReturn except: #traceback.print_exc() return None
def buildLightNovelReply(searchText, isExpanded, baseComment, blockTracking=False): try: mal = { 'search_function': MAL.getLightNovelDetails, 'synonym_function': MAL.getSynonyms, 'checked_synonyms': [], 'result': None } ani = { 'search_function': Anilist.getLightNovelDetails, 'synonym_function': Anilist.getSynonyms, 'checked_synonyms': [], 'result': None } nu = {'search_function': NU.getLightNovelURL, 'result': None} lndb = {'search_function': LNDB.getLightNovelURL, 'result': None} try: sqlCur.execute( 'SELECT dbLinks FROM synonyms WHERE type = "LN" and lower(name) = ?', [searchText.lower()]) except sqlite3.Error as e: print(e) alternateLinks = sqlCur.fetchone() if (alternateLinks): synonym = json.loads(alternateLinks[0]) if synonym: malsyn = None if 'mal' in synonym and synonym['mal']: malsyn = synonym['mal'] anisyn = None if 'ani' in synonym and synonym['ani']: anisyn = synonym['ani'] nusyn = None if 'nu' in synonym and synonym['nu']: nusyn = synonym['nu'] lndbsyn = None if 'lndb' in synonym and synonym['lndb']: lndbsyn = synonym['lndb'] mal['result'] = MAL.getLightNovelDetails( malsyn[0], malsyn[1]) if malsyn else None ani['result'] = Anilist.getMangaDetailsById( anisyn) if anisyn else None nu['result'] = NU.getLightNovelById(nusyn) if nusyn else None lndb['result'] = LNDB.getLightNovelById( lndbsyn) if lndbsyn else None else: data_sources = [ani, mal] aux_sources = [nu, lndb] synonyms = set([searchText]) for x in range(len(data_sources)): for source in data_sources: if source['result']: break else: for synonym in synonyms: if synonym in source['checked_synonyms']: continue source['result'] = source['search_function']( synonym) source['checked_synonyms'].append(synonym) if source['result']: break if source['result']: synonyms.update([ synonym.lower() for synonym in source['synonym_function'](source['result']) ]) for source in aux_sources: for synonym in synonyms: source['result'] = source['search_function'](synonym) if source['result']: break if ani['result'] or mal['result']: try: titleToAdd = '' if mal['result']: titleToAdd = mal['result']['title'] if ani['result']: try: titleToAdd = ani['result']['title_romaji'] except: titleToAdd = ani['result']['title_english'] if (str(baseComment.subreddit).lower is not 'nihilate') and ( str(baseComment.subreddit).lower is not 'roboragi') and not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'LN', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass return CommentBuilder.buildLightNovelComment(isExpanded, mal['result'], ani['result'], nu['result'], lndb['result']) except Exception as e: traceback.print_exc() return None
def buildStatsComment(subreddit=None, username=None): try: statComment = '' receipt = '(S) Request successful: Stats' if username: userStats = DatabaseHandler.getUserStats(username) if userStats: statComment += 'Some stats on /u/' + username + ':\n\n' statComment += '- **' + str(userStats['totalUserComments']) + '** total comments searched (' + str(round(userStats['totalUserCommentsAsPercentage'], 3)) + '% of all comments)\n' statComment += '- **' + str(userStats['totalUserRequests']) + '** requests made (' + str(round(userStats['totalUserRequestsAsPercentage'], 3)) + '% of all requests and #' + str(userStats['overallRequestRank']) + ' overall)\n' statComment += '- **' + str(userStats['uniqueRequests']) + '** unique anime/manga requested\n' statComment += '- **/r/' + str(userStats['favouriteSubreddit']) + '** is their favourite subreddit with ' + str(userStats['favouriteSubredditCount']) + ' requests (' + str(round(userStats['favouriteSubredditCountAsPercentage'], 3)) + '% of the subreddit\'s requests)\n' statComment += '\n' statComment += 'Their most frequently requested anime/manga overall are:\n\n' for i, request in enumerate(userStats['topRequests']): statComment += str(i + 1) + '. **' + str(request[0]) + '** (' + str(request[1]) + ' - ' + str(request[2]) + ' requests) \n' else: statComment += '/u/' + str(username) + ' hasn\'t used Roboragi yet.' receipt += ' - /u/' + username elif subreddit: subreddit = str(subreddit) subredditStats = DatabaseHandler.getSubredditStats(subreddit.lower()) if subredditStats: statComment += '**/r/' + subreddit +' Stats**\n\n' statComment += 'I\'ve searched through ' + str(subredditStats['totalComments']) statComment += ' unique comments on /r/' + subreddit statComment += ' and fulfilled a total of ' + str(subredditStats['total']) + ' requests, ' statComment += 'representing ' + str(round(subredditStats['totalAsPercentage'], 2)) + '% of all requests. ' statComment += 'A total of ' + str(subredditStats['uniqueNames']) + ' unique anime/manga have been requested here, ' statComment += 'with a mean value of ' + str(round(subredditStats['meanValuePerRequest'], 3)) + ' requests/show' statComment += ' and a standard deviation of ' + str(round(subredditStats['standardDeviation'], 3)) + '.' statComment += '\n\n' statComment += 'The most frequently requested anime/manga on this subreddit are:\n\n' for i, request in enumerate(subredditStats['topRequests']): statComment += str(i + 1) + '. **' + str(request[0]) + '** (' + str(request[1]) + ' - ' + str(request[2]) + ' requests)\n' statComment += '\n' statComment += 'The most frequent requesters on this subreddit are:\n\n' for i, requester in enumerate(subredditStats['topRequesters']): statComment += str(i + 1) + '. /u/' + str(requester[0]) + ' (' + str(requester[1]) + ' requests)\n' else: statComment += 'There have been no requests on /r/' + str(subreddit) + ' yet.' receipt += ' - /r/' + subreddit else: basicStats = DatabaseHandler.getBasicStats() #The overall stats section statComment += '**Overall Stats**\n\n' statComment += 'I\'ve searched through ' + str(basicStats['totalComments']) statComment += ' unique comments and fulfilled a total of ' + str(basicStats['total']) statComment += ' requests across ' + str(basicStats['uniqueSubreddits']) + ' unique subreddit(s). ' statComment += 'A total of ' + str(basicStats['uniqueNames']) statComment += ' unique anime/manga have been requested, with a mean value of ' + str(round(basicStats['meanValuePerRequest'],3)) statComment += ' requests/show and a standard deviation of ' + str(round(basicStats['standardDeviation'], 3)) + '.' statComment += '\n\n' statComment += 'The most frequently requested anime/manga overall are:\n\n' for i, request in enumerate(basicStats['topRequests']): statComment += str(i + 1) + '. **' + str(request[0]) + '** (' + str(request[1]) + ' - ' + str(request[2]) + ' requests)\n' statComment += '\n' statComment += 'The most frequent requesters overall are: \n' for i, requester in enumerate(basicStats['topRequesters']): statComment += str(i + 1) + '. /u/' + str(requester[0]) + ' (' + str(requester[1]) + ' requests) \n' receipt += ' - Basic' print(receipt) return statComment except: traceback.print_exc() return None
def buildMangaReply(searchText, isExpanded, baseComment, blockTracking=False): try: ani = None mal = None mu = None ap = None try: sqlCur.execute( 'SELECT dbLinks FROM synonyms WHERE type = "Manga" and lower(name) = ?', [searchText.lower()]) except sqlite3.Error as e: print(e) alternateLinks = sqlCur.fetchone() if (alternateLinks): synonym = json.loads(alternateLinks[0]) if 'mal' in synonym: if (synonym['mal']): mal = MAL.getMangaDetails(synonym['mal'][0], synonym['mal'][1]) if 'ani' in synonym: if (synonym['ani']): ani = Anilist.getMangaDetailsById(synonym['ani']) if 'mu' in synonym: if (synonym['mu']): mu = MU.getMangaURLById(synonym['mu']) if 'ap' in synonym: if (synonym['ap']): ap = AniP.getMangaURLById(synonym['ap']) else: #Basic breakdown: #If Anilist finds something, use it to find the MAL version. #If hits either MAL or Ani, use it to find the MU version. #If it hits either, add it to the request-tracking DB. ani = Anilist.getMangaDetails(searchText) if ani: try: mal = MAL.getMangaDetails(ani['title_romaji']) except: pass if not mal: try: mal = MAL.getMangaDetails(ani['title_english']) except: pass if not mal: mal = MAL.getMangaDetails(searchText) else: mal = MAL.getMangaDetails(searchText) if mal: ani = Anilist.getMangaDetails(mal['title']) #----- Finally... -----# if ani or mal: try: titleToAdd = '' if mal: titleToAdd = mal['title'] else: try: titleToAdd = ani['title_english'] except: titleToAdd = ani['title_romaji'] if not alternateLinks: #MU stuff if mal: mu = MU.getMangaURL(mal['title']) else: mu = MU.getMangaURL(ani['title_romaji']) #Do the anime-planet stuff if mal and not ap: if mal['title'] and not ap: ap = AniP.getMangaURL(mal['title']) if mal['english'] and not ap: ap = AniP.getMangaURL(mal['english']) if mal['synonyms'] and not ap: for synonym in mal['synonyms']: if ap: break ap = AniP.getMangaURL(synonym) if ani and not ap: if ani['title_english'] and not ap: ap = AniP.getMangaURL(ani['title_english']) if ani['title_romaji'] and not ap: ap = AniP.getMangaURL(ani['title_romaji']) if ani['synonyms'] and not ap: for synonym in ani['synonyms']: if ap: break ap = AniP.getMangaURL(synonym) if (str(baseComment.subreddit).lower is not 'nihilate') and ( str(baseComment.subreddit).lower is not 'roboragi') and not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'Manga', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass return CommentBuilder.buildMangaComment(isExpanded, mal, ani, mu, ap) except Exception as e: traceback.print_exc() return None
def buildMangaEmbed(isExpanded, mal, ani, mu, ap): try: comment = '' descComment = '' title = None jTitle = None cType = None malimage = '' malURL = None aniURL = None muURL = mu apURL = ap status = None chapters = None volumes = None genres = [] desc = None if not (mal is None): title = mal['title'] malURL = 'http://myanimelist.net/manga/' + str(mal['id']) desc = mal['synopsis'] status = mal['status'] malimage = mal['image'] cType = mal['type'] try: if (int(mal['chapters']) == 0): chapters = 'Unknown' else: chapters = mal['chapters'] except: chapters = 'Unknown' try: if (int(mal['volumes']) == 0): volumes = 'Unknown' else: volumes = mal['volumes'] except: volumes = 'Unknown' if ani is not None: if title is None: title = ani['title_english'] aniURL = 'http://anilist.co/manga/' + str(ani['id']) if ani['description']: desc = ani['description'] try: status = ani['publishing_status'].title() except: pass cType = ani['type'] try: if ani['title_japanese'] is not None: jTitle = ani['title_japanese'] if ani['total_chapters'] is not None: if ani['total_chapters'] == 0: chapters = 'Unknown' else: chapters = ani['total_chapters'] if ani['total_volumes'] is not None: if ani['total_volumes'] == 0: volumes = 'Unknown' else: volumes = ani['total_volumes'] if ani['genres'] is not None: genres = ani['genres'] except Exception as e: print(e) stats = DatabaseHandler.getRequestStats(title, 'Manga') #---------- BUILDING THE COMMENT ----------# #----- LINKS -----# urlComments = [] allLinks = '' if malURL is not None: urlComments.append("[MAL]({})".format( sanitise_url_for_markdown(malURL))) if aniURL is not None: urlComments.append("[ANI]({})".format( sanitise_url_for_markdown(aniURL))) if apURL is not None: urlComments.append("[AP]({})".format( sanitise_url_for_markdown(apURL))) if muURL is not None: urlComments.append("[MU]({})".format( sanitise_url_for_markdown(muURL))) for i, link in enumerate(urlComments): if i is not 0: allLinks += ', ' allLinks += link #----- JAPANESE TITLE -----# if (isExpanded): if jTitle is not None: comment += '\n\n' splitJTitle = jTitle.split() for i, word in enumerate(splitJTitle): if not (i == 0): comment += ' ' comment += word #----- INFO LINE -----# if (isExpanded): comment += '\n\n(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += '**' + cType + '** | ' comment += '**Status:** ' + status if (cType != 'Light Novel'): if str(volumes) is not 'Unknown': comment += ' | **Volumes:** ' + str(volumes) if str(chapters) is not 'Unknown': comment += ' | **Chapters:** ' + str(chapters) else: if str(volumes) is not 'Unknown': comment += ' | **Volumes:** ' + str(volumes) if genres: comment += ' | **Genres:** ' else: comment += '\n\n(' if cType: if cType == 'Novel': cType = 'Light Novel' comment += cType + ' | ' comment += 'Status: ' + status if (cType != 'Light Novel'): if str(volumes) is not 'Unknown': comment += ' | Volumes: ' + str(volumes) if str(chapters) is not 'Unknown': comment += ' | Chapters: ' + str(chapters) else: if str(volumes) is not 'Unknown': comment += ' | Volumes: ' + str(volumes) if genres: comment += ' | Genres: ' if genres: for i, genre in enumerate(genres): if i is not 0: comment += ', ' comment += genre if (isExpanded) and (stats is not None): comment += ') \n\n**Stats:** ' + str( stats['total']) + ' requests across ' + str( stats['uniqueSubreddits']) + ' server(s)) - ' + str( round(stats['totalAsPercentage'], 3)) + '% of all requests' else: comment += ')' #----- DESCRIPTION -----# if (isExpanded): descComment += cleanupDescription(desc) #----- END -----# receipt = '(M) Request successful: ' + title + ' - ' if malURL is not None: receipt += 'MAL ' if ap is not None: receipt += 'AP ' if ani is not None: receipt += 'AL ' if muURL is not None: receipt += 'MU ' print(receipt.encode('utf8')) #----- Build embed object -----# try: embed = buildEmbedObject(title, allLinks, comment, malimage, isExpanded, descComment) except Exception as e: print(e) dictToReturn = {} dictToReturn['title'] = title dictToReturn['embed'] = embed return dictToReturn except Exception as e: print(e) #traceback.print_exc() return None
async def process_message(message, is_edit=False): #Anime/Manga requests that are found go into separate arrays animeArray = [] mangaArray = [] lnArray = [] #Checks if bot has permissions to embed if message.channel.type != discord.ChannelType.private: canEmbed = message.channel.server.default_role.permissions.embed_links else: canEmbed = True if not canEmbed: botMember = Discord.getMemberFromID(Config.clientid, message.server) defaultroleperm = botMember.top_role.permissions canEmbed = defaultroleperm.embed_links isAdmin = message.author.top_role.permissions.administrator isServerMod = message.author.top_role.permissions.manage_server isOwner = message.author.id == ownerID if message.author.bot: return #ignores all "code" markup (i.e. anything between backticks) preCleanMessage = re.sub(r"\`(.*?)\`", "", message.clean_content) cleanMessage = re.sub(r'<:.+?:([0-9]{15,21})>', "", preCleanMessage) messageReply = '' if re.search('({!help.*?}|{{!help.*?}}|<!help.*?>|<<!help.*?>>)', cleanMessage, re.S) is not None: try: localEm = CommentBuilder.buildHelpEmbed() await Discord.client.send_message(message.channel, embed=localEm) return except: return if re.search( '({!command.*?}|{{!command.*?}}|<!command.*?>|<<!command.*?>>)', cleanMessage, re.S) is not None: if 'toggleexpanded' in cleanMessage.lower() and (isAdmin or isServerMod): try: allowedStatus = DatabaseHandler.toggleAllowExpanded( message.server.id) print("Toggled allowExpanded for server {}".format( message.server.id)) if allowedStatus.lower() == 'true': await Discord.client.send_message( message.channel, "Expanded requests are now allowed.") else: await Discord.client.send_message( message.channel, "Expanded requests are now disallowed.") return except Exception as e: print(e) return if 'addserver' in cleanMessage.lower() and (isOwner == True): try: DatabaseHandler.addServerToDatabase(message.server.id) await Discord.client.send_message(message.channel, "Server has been added.") return except Exception as e: print(e) return else: print("command failed, user probably has insufficient rights") return sender = re.search('[@]([A-Za-z0-9 _-]+?)(>|}|$)', cleanMessage, re.S) mentionArray = message.raw_mentions if re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)', cleanMessage, re.S) is not None and sender is not None: for mention in mentionArray: if not canEmbed: messageReply = CommentBuilder.buildStatsComment( server=message.server, username=mention) else: localEm = CommentBuilder.buildStatsEmbed(server=message.server, username=mention) await Discord.client.send_message(message.channel, embed=localEm) return None if re.search('({!sstats}|{{!sstats}}|<!sstats>|<<!sstats>>)', cleanMessage, re.S) is not None: if not canEmbed: messageReply = CommentBuilder.buildStatsComment( server=message.server) else: localEm = CommentBuilder.buildStatsEmbed(server=message.server) await Discord.client.send_message(message.channel, embed=localEm) return None elif re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)', cleanMessage, re.S) is not None: if not canEmbed: messageReply = CommentBuilder.buildStatsComment() else: localEm = CommentBuilder.buildStatsEmbed() await Discord.client.send_message(message.channel, embed=localEm) return None else: #The basic algorithm here is: #If it's an expanded request, build a reply using the data in the braces, clear the arrays, add the reply to the relevant array and ignore everything else. #If it's a normal request, build a reply using the data in the braces, add the reply to the relevant array. #Counts the number of expanded results vs total results. If it's not just a single expanded result, they all get turned into normal requests. numOfRequest = 0 numOfExpandedRequest = 0 forceNormal = False expandedAllowed = DatabaseHandler.checkServerConfig( 'allowexpanded', message.server.id) if expandedAllowed == False: forceNormal = True for match in re.finditer("\{{2}([^}]*)\}{2}|\<{2}([^>]*)\>{2}", cleanMessage, re.S): numOfRequest += 1 numOfExpandedRequest += 1 print("Request found: {}".format(match.group(0))) for match in re.finditer( "(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))|(?<=(?<!\<)\<)([^\<\>]*)(?=\>(?!\>))", cleanMessage, re.S): numOfRequest += 1 print("Request found: {}".format(match.group(0))) if (numOfExpandedRequest >= 1) and (numOfRequest > 1): forceNormal = True #if numOfRequest != 0: #await Discord.client.send_typing(message.channel) #Expanded Anime for match in re.finditer("\{{2}([^}]*)\}{2}", cleanMessage, re.S): reply = '' if match.group(1) != '': if (forceNormal) or (str(message.channel).lower() in disableexpanded): reply = await DiscordoragiSearch.buildAnimeReply( match.group(1), message, False, canEmbed) else: reply = await DiscordoragiSearch.buildAnimeReply( match.group(1), message, True, canEmbed) if (reply is not None): animeArray.append(reply) else: print("Empty request, ignoring") #Normal Anime for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))", cleanMessage, re.S): if match.group(1) != '': reply = await DiscordoragiSearch.buildAnimeReply( match.group(1), message, False, canEmbed) if (reply is not None): animeArray.append(reply) else: print('Could not find anime') else: print("Empty request, ignoring") #Expanded Manga #NORMAL EXPANDED for match in re.finditer("\<{2}([^>]*)\>{2}(?!(:|\>))", cleanMessage, re.S): if match.group(1) != '': reply = '' if (forceNormal) or (str(message.channel).lower() in disableexpanded): reply = await DiscordoragiSearch.buildMangaReply( match.group(1), message, False, canEmbed) else: reply = await DiscordoragiSearch.buildMangaReply( match.group(1), message, True, canEmbed) if (reply is not None): mangaArray.append(reply) else: print("Empty request, ignoring") #AUTHOR SEARCH EXPANDED for match in re.finditer("\<{2}([^>]*)\>{2}:\(([^)]+)\)", cleanMessage, re.S): if match.group(1) != '': reply = '' if (forceNormal) or (str(message.server).lower() in disableexpanded): reply = await DiscordoragiSearch.buildMangaReplyWithAuthor( match.group(1), match.group(2), message, False, canEmbed) else: reply = await DiscordoragiSearch.buildMangaReplyWithAuthor( match.group(1), match.group(2), message, True, canEmbed) if (reply is not None): mangaArray.append(reply) else: print("Empty request, ignoring") #Normal Manga #NORMAL for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]+)\>(?!(:|\>))", cleanMessage, re.S): if match.group(1) != '': reply = await DiscordoragiSearch.buildMangaReply( match.group(1), message, False, canEmbed) if (reply is not None): mangaArray.append(reply) else: print("Empty request, ignoring") #AUTHOR SEARCH for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]*)\>:\(([^)]+)\)", cleanMessage, re.S): reply = await DiscordoragiSearch.buildMangaReplyWithAuthor( match.group(1), match.group(2), message, False, canEmbed) if (reply is not None): mangaArray.append(reply) #Expanded LN for match in re.finditer("\]{2}([^]]*)\[{2}", cleanMessage, re.S): if match.group(1) != '': reply = '' if (forceNormal) or (str(message.server).lower() in disableexpanded): reply = await DiscordoragiSearch.buildLightNovelReply( match.group(1), False, message, canEmbed) else: reply = await DiscordoragiSearch.buildLightNovelReply( match.group(1), True, message, canEmbed) if (reply is not None): lnArray.append(reply) else: print("Empty request, ignoring") #Normal LN for match in re.finditer("(?<=(?<!\])\])([^\]\[]*)(?=\[(?!\[))", cleanMessage, re.S): if match.group(1) != '': reply = await DiscordoragiSearch.buildLightNovelReply( match.group(1), False, message, canEmbed) if (reply is not None): lnArray.append(reply) else: print("Empty request, ignoring") #Here is where we create the final reply to be posted #The final message reply. We add stuff to this progressively. postedAnimeTitles = [] postedMangaTitles = [] postedLNTitles = [] messageReply = '' #Basically just to keep track of people posting the same title multiple times (e.g. {Nisekoi}{Nisekoi}{Nisekoi}) postedAnimeTitles = [] postedMangaTitles = [] postedLNTitles = [] #Adding all the anime to the final message. If there's manga too we split up all the paragraphs and indent them in Reddit markup by adding a '>', then recombine them for i, animeReply in enumerate(animeArray): if not (i is 0): messageReply += '\n\n' if not (animeReply['title'] in postedAnimeTitles): postedAnimeTitles.append(animeReply['title']) if not canEmbed: messageReply += animeReply['comment'] else: messageReply = 'n/a' if mangaArray: messageReply += '\n\n' #Adding all the manga to the final message for i, mangaReply in enumerate(mangaArray): if not (i is 0): messageReply += '\n\n' if not (mangaReply['title'] in postedMangaTitles): postedMangaTitles.append(mangaReply['title']) if not canEmbed: messageReply += mangaReply['comment'] else: messageReply = 'n/a' if lnArray: messageReply += '\n\n' #Adding all the manga to the final comment for i, lnReply in enumerate(lnArray): if not (i is 0): commentReply += '\n\n' if not (lnReply['title'] in postedLNTitles): postedLNTitles.append(lnReply['title']) if not canEmbed: messageReply += lnReply['comment'] else: messageReply = 'N/A' #If there are more than 10 requests, shorten them all if not (messageReply is '') and (len(animeArray) + len(mangaArray) >= 10): messageReply = re.sub(r"\^\((.*?)\)", "", messageReply, flags=re.M) #If there was actually something found, add the signature and post the message to Reddit. Then, add the message to the "already seen" database. if not (messageReply is ''): if is_edit: if not canEmbed: await Discord.client.send_message(message.channel, messageReply) else: for i, animeReply in enumerate(animeArray): await Discord.client.send_message( message.channel, embed=animeReply['embed']) for i, mangaReply in enumerate(mangaArray): await Discord.client.send_message( message.channel, embed=mangaReply['embed']) for i, lnReply in enumerate(lnArray): await Discord.client.send_message(message.channel, embed=lnReply['embed']) else: try: print("Message created.\n") if not canEmbed: await Discord.client.send_message(message.channel, messageReply) else: for i, animeReply in enumerate(animeArray): await Discord.client.send_message( message.channel, embed=animeReply['embed']) for i, mangaReply in enumerate(mangaArray): await Discord.client.send_message( message.channel, embed=mangaReply['embed']) for i, lnReply in enumerate(lnArray): await Discord.client.send_message( message.channel, embed=lnReply['embed']) except discord.errors.Forbidden: print('Request from banned channel: ' + str(message.channel) + '\n') except Exception as e: print(e) traceback.print_exc() except: traceback.print_exc() else: try: if is_edit: return None else: DatabaseHandler.addMessage(message.id, message.author.id, message.server.id, False) except: traceback.print_exc()
def buildAnimeReply(searchText, isExpanded, baseComment, blockTracking=False): try: mal = None hb = None ani = None ap = None try: sqlCur.execute('SELECT dbLinks FROM synonyms WHERE type = "Anime" and lower(name) = ?', [searchText.lower()]) except sqlite3.Error as e: print(e) alternateLinks = sqlCur.fetchone() if (alternateLinks): synonym = json.loads(alternateLinks[0]) if (synonym['mal']): mal = MAL.getAnimeDetails(synonym['mal']) if (synonym['hb']): hb = Hummingbird.getAnimeDetails(synonym['hb']) if (synonym['ani']): ani = Anilist.getAnimeDetails(synonym['ani']) if (synonym['ap']): ap = AniP.getAnimeURL(synonym['ap']) else: #Basic breakdown: #If Anilist finds something, use it to find the HB version. #If we can't find it, try with HB and use it to try and "refind" Anilist #If we hit HB, we don't need to look for MAL, since we can get the MAL ID from within HB. If we don't hit HB, find MAL on its own. #If, at the end, we have something from Anilist, get the full set of Anilist data #If it hits anything, add it to the request-tracking DB. ani = Anilist.getAnimeDetails(searchText) if (ani is not None): hb = Hummingbird.getAnimeDetails(ani['title_romaji']) if (hb is None): for synonym in ani['synonyms']: hb = Hummingbird.getAnimeDetails(synonym) if hb is not None: break hb = Hummingbird.getAnimeDetails(ani['title_english']) else: hb = Hummingbird.getAnimeDetails(searchText) if (hb is not None): ani = Anilist.getAnimeDetails(hb['title']) #Doing MAL stuff if not mal: if hb: mal = MAL.getAnimeDetails(hb['title']) if not mal and hb['alternate_title']: if (hb['alternate_title']): mal = MAL.getAnimeDetails(hb['alternate_title']) if ani and not mal: mal = MAL.getAnimeDetails(ani['title_romaji']) if not mal: mal = MAL.getAnimeDetails(ani['title_english']) if not mal and ani['synonyms']: for synonym in ani['synonyms']: if mal: break mal = MAL.getAnimeDetails(synonym) if not mal: mal = MAL.getAnimeDetails(searchText) if mal and not hb: hb = Hummingbird.getAnimeDetails(mal['title']) if not hb: hb = Hummingbird.getAnimeDetails(mal['english']) if mal and not ani: ani = Anilist.getAnimeDetails(mal['title']) if not ani: ani = Anilist.getAnimeDetails(mal['english']) #----- Finally... -----# try: if ani is not None: aniFull = Anilist.getFullAnimeDetails(ani['id']) if aniFull is not None: ani = aniFull except: pass if (ani is not None) or (hb is not None) or (mal is not None): try: titleToAdd = '' if mal: titleToAdd = mal['title'] if hb: titleToAdd = hb['title'] if ani: titleToAdd = ani['title_romaji'] #Do Anime-Planet stuff if mal and not ap: if mal['title'] and not ap: ap = AniP.getAnimeURL(mal['title']) if mal['english'] and not ap: ap = AniP.getAnimeURL(mal['english']) if mal['synonyms'] and not ap: for synonym in mal['synonyms']: if ap: break ap = AniP.getAnimeURL(synonym) if hb and not ap: if hb['title'] and not ap: ap = AniP.getAnimeURL(hb['title']) if hb['alternate_title'] and not ap: ap = AniP.getAnimeURL(hb['alternate_title']) if ani and not ap: if ani['title_english'] and not ap: ap = AniP.getAnimeURL(ani['title_english']) if ani['title_romaji'] and not ap: ap = AniP.getAnimeURL(ani['title_romaji']) if ani['synonyms'] and not ap: for synonym in ani['synonyms']: if ap: break ap = AniP.getAnimeURL(synonym) if (str(baseComment.subreddit).lower is not 'nihilate') and (str(baseComment.subreddit).lower is not 'roboragi') and not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'Anime', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass if ani is not None: if ani['adult'] is True: print("NSFW ENTRY") mal = None hb = None ani = None ap = None return CommentBuilder.buildAnimeComment(isExpanded, mal, hb, ani, ap) except Exception as e: traceback.print_exc() return None
def buildLightNovelReply(searchText, isExpanded, baseComment, blockTracking=False): try: mal = {'search_function': MAL.getLightNovelDetails, 'synonym_function': MAL.getSynonyms, 'checked_synonyms': [], 'result': None} ani = {'search_function': Anilist.getLightNovelDetails, 'synonym_function': Anilist.getSynonyms, 'checked_synonyms': [], 'result': None} nu = {'search_function': NU.getLightNovelURL, 'result': None} lndb = {'search_function': LNDB.getLightNovelURL, 'result': None} try: sqlCur.execute('SELECT dbLinks FROM synonyms WHERE type = "LN" and lower(name) = ?', [searchText.lower()]) except sqlite3.Error as e: print(e) alternateLinks = sqlCur.fetchone() if (alternateLinks): synonym = json.loads(alternateLinks[0]) if synonym: malsyn = None if 'mal' in synonym and synonym['mal']: malsyn = synonym['mal'] anisyn = None if 'ani' in synonym and synonym['ani']: anisyn = synonym['ani'] nusyn = None if 'nu' in synonym and synonym['nu']: nusyn = synonym['nu'] lndbsyn = None if 'lndb' in synonym and synonym['lndb']: lndbsyn = synonym['lndb'] mal['result'] = MAL.getLightNovelDetails(malsyn[0],malsyn[1]) if malsyn else None ani['result'] = Anilist.getMangaDetailsById(anisyn) if anisyn else None nu['result'] = NU.getLightNovelById(nusyn) if nusyn else None lndb['result'] = LNDB.getLightNovelById(lndbsyn) if lndbsyn else None else: data_sources = [ani, mal] aux_sources = [nu, lndb] synonyms = set([searchText]) for x in range(len(data_sources)): for source in data_sources: if source['result']: break else: for synonym in synonyms: if synonym in source['checked_synonyms']: continue source['result'] = source['search_function'](synonym) source['checked_synonyms'].append(synonym) if source['result']: break if source['result']: synonyms.update([synonym.lower() for synonym in source['synonym_function'](source['result'])]) for source in aux_sources: for synonym in synonyms: source['result'] = source['search_function'](synonym) if source['result']: break if ani['result'] or mal['result']: try: titleToAdd = '' if mal['result']: titleToAdd = mal['result']['title'] if ani['result']: try: titleToAdd = ani['result']['title_romaji'] except: titleToAdd = ani['result']['title_english'] if (str(baseComment.subreddit).lower is not 'nihilate') and (str(baseComment.subreddit).lower is not 'roboragi') and not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'LN', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass return CommentBuilder.buildLightNovelComment(isExpanded, mal['result'], ani['result'], nu['result'], lndb['result']) except Exception as e: traceback.print_exc() return None
def buildMangaReply(searchText, isExpanded, baseComment, blockTracking=False): try: ani = None mal = None mu = None ap = None try: sqlCur.execute('SELECT dbLinks FROM synonyms WHERE type = "Manga" and lower(name) = ?', [searchText.lower()]) except sqlite3.Error as e: print(e) alternateLinks = sqlCur.fetchone() if (alternateLinks): synonym = json.loads(alternateLinks[0]) if (synonym['mal']): mal = MAL.getMangaDetails(synonym['mal']) if (synonym['ani']): ani = Anilist.getMangaDetails(synonym['ani']) if (synonym['mu']): mu = MU.getMangaURL(synonym['mu']) if (synonym['ap']): ap = AniP.getMangaURL(synonym['ap']) else: #Basic breakdown: #If Anilist finds something, use it to find the MAL version. #If hits either MAL or Ani, use it to find the MU version. #If it hits either, add it to the request-tracking DB. ani = Anilist.getMangaDetails(searchText) if not (ani is None): mal = MAL.getMangaDetails(ani['title_romaji']) else: mal = MAL.getMangaDetails(searchText) if not (mal is None): ani = Anilist.getMangaDetails(mal['title']) #----- Finally... -----# if ani or mal: try: titleToAdd = '' if mal: titleToAdd = mal['title'] else: titleToAdd = ani['title_english'] if not alternateLinks: #MU stuff if mal: mu = MU.getMangaURL(mal['title']) else: mu = MU.getMangaURL(ani['title_romaji']) #Do the anime-planet stuff if mal and not ap: if mal['title'] and not ap: ap = AniP.getMangaURL(mal['title']) if mal['english'] and not ap: ap = AniP.getMangaURL(mal['english']) if mal['synonyms'] and not ap: for synonym in mal['synonyms']: if ap: break ap = AniP.getMangaURL(synonym) if ani and not ap: if ani['title_english'] and not ap: ap = AniP.getMangaURL(ani['title_english']) if ani['title_romaji'] and not ap: ap = AniP.getMangaURL(ani['title_romaji']) if ani['synonyms'] and not ap: for synonym in ani['synonyms']: if ap: break ap = AniP.getMangaURL(synonym) if (str(baseComment.subreddit).lower is not 'nihilate') and (str(baseComment.subreddit).lower is not 'roboragi') and not blockTracking: DatabaseHandler.addRequest(titleToAdd, 'Manga', baseComment.author.name, baseComment.subreddit) except: traceback.print_exc() pass if ani is not None: if ani['adult'] is True: mal = None ani = None mu = None ap = None return CommentBuilder.buildMangaComment(isExpanded, mal, ani, mu, ap) except Exception as e: traceback.print_exc() return None
async def getAnimeDetails(searchText, animeId=None): cachedAnime = DatabaseHandler.checkForMalEntry('malanime', searchText, animeId) if cachedAnime is not None: if cachedAnime['update']: print("found cached anime, needs update in mal") pass else: print("found cached anime, doesn't need update in mal") return cachedAnime['content'] cleanSearchText = urllib.parse.quote(searchText) try: try: async with mal.get('https://myanimelist.net/api/anime/search.xml?q=' + cleanSearchText.rstrip(), timeout=10) as resp: if resp.status != 200: print("Searching for {} failed with error code {}".format(searchText.rstrip(), resp.status)) request = await resp.text() except Exception as e: print(e) setup() try: async with mal.get('https://myanimelist.net/api/anime/search.xml?q=' + searchText.rstrip(), timeout=10) as resp: request = await resp.text() except aiohttp.exceptions.RequestException as e: # This is the correct syntax print(e) #convertedRequest = convertShittyXML(request) rawList = ET.fromstring(request) animeList = [] for anime in rawList.findall('./entry'): animeID = anime.find('id').text title = anime.find('title').text title_english = anime.find('english').text synonyms = None if anime.find('synonyms').text is not None: synonyms = anime.find('synonyms').text.split(";") episodes = anime.find('episodes').text animeType = anime.find('type').text status = anime.find('status').text start_date = anime.find('start_date').text end_date = anime.find('end_date').text synopsis = anime.find('synopsis').text image = anime.find('image').text data = {'id': animeID, 'title': title, 'english': title_english, 'synonyms': synonyms, 'episodes': episodes, 'type': animeType, 'status': status, 'start_date': start_date, 'end_date': end_date, 'synopsis': synopsis, 'image': image} animeList.append(data) if animeId: closestAnime = getThingById(animeId, animeList) elif cachedAnime and cachedAnime['update']: closestAnime = getThingById(cachedAnime['id'], animeList) else: closestAnime = getClosestAnime(searchText.strip(), animeList) return closestAnime except Exception as e: print("Error finding anime:{} on MAL\nError:{}".format(searchText, e)) #traceback.print_exc() return None