def SearchDbForShow(list_of_shows): db = models.connect() source = db.query(ScanURL).filter(ScanURL.media_type == "index").first() if source: browser = LinkRetrieve.source_login(source) if browser is None: ActionLog.log('%s could not logon' % source.login_page) else: for show_searcher in [l for l in list_of_shows if not l.retrieved]: config = db.query(models.Config).first() matching_indexes = [] for search_text in show_searcher.search_list: # all potential links to list matching_indexes.extend(db.query(LinkIndex).filter(LinkIndex.link_text.like('%' + search_text + '%')).filter(LinkIndex.link_text.like('%' + show_searcher.episode_code + '%')).all()) if len(matching_indexes) > 0: for match in matching_indexes: tv_response = browser.get(match.link_url) if tv_response.status_code == 200: episode_soup = tv_response.soup episode_links = LinkRetrieve.get_download_links(episode_soup, config, source.domain, config.hd_format) # check to make sure links are active for l in episode_links: link_response = browser.get(l) if link_response.status_code == 404: episode_links = None break if episode_links: LinkRetrieve.process_tv_link(db, config, show_searcher, episode_links) break # since we got the download, we can break out of the loop
def search(self, show_search): try: t = tvdb_api.Tvdb() search_results = t.search(show_search) ActionLog.log('Search for "%s".' % show_search) except Exception as ex: search_results = "{error: %s}" % Exception return json.dumps(search_results)
def update_episode(self): status = 'success' try: data = cherrypy.request.json episode_id = data['episodeid'] change_to_value = data['changeto'] if (change_to_value == 'search'): new_status = LinkRetrieve.show_search(episode_id, cherrypy.request.db) status = 'success' if new_status == 'Retrieved' else 'failed' else: change_to_value = change_to_value.title() e = cherrypy.request.db.query(Episode).filter(Episode.id == episode_id).first() e.status = change_to_value cherrypy.request.db.commit() except Exception as ex: ActionLog.log(ex) status = 'error' return json.dumps(status)
def refresh(self): status = 'success' try: data = cherrypy.request.json is_show_refresh = data['isshowrefresh'] is_scan = data['isscan'] if is_scan: LinkRetrieve.handle_downloads() if is_show_refresh: Utils.update_all() except Exception as ex: ActionLog.log(ex) status = 'error' return json.dumps(status)
def handle_movie(self): ar = AjaxResponse('Movie downloading...') try: data = cherrypy.request.json movie_id = data['movieid'] is_ignore = data['isignore'] is_cleanup = data['iscleanup'] config = cherrypy.request.db.query(Config).first() if is_cleanup: cherrypy.request.db.query(Movie).filter(Movie.status == 'Ignored').delete() cherrypy.request.db.commit() ActionLog.log("DB cleanup completed") else: m = cherrypy.request.db.query(Movie).filter(Movie.id == movie_id).first() if is_ignore: m.status = 'Ignored' else: jdownloader_string = '' for l in m.movieurls.all(): jdownloader_string += l.url + ' ' LinkRetrieve.write_crawljob_file(m.name, config.movies_directory, jdownloader_string, config.crawljob_directory) ActionLog.log('"%s\'s" .crawljob file created.' % m.name) m.status = 'Retrieved' cherrypy.request.db.commit() except Exception as ex: ActionLog.log('error - ' + ex) ar.status = 'error' ar.message = ex return ar.to_JSON()
def update_show(self): status = 'success' try: data = cherrypy.request.json show_id = data['showid'] action = data['action'] if action == 'refresh': Utils.add_episodes(show_id) if action == 'remove': s = cherrypy.request.db.query(Show).filter(Show.show_id == show_id).first() ActionLog.log('"%s" removed.' % s.show_name) cherrypy.request.db.delete(s) cherrypy.request.db.commit() except Exception as ex: # logger.exception(ex) status = 'error' return json.dumps(status)
def add_show(self): status = 'success' try: data = cherrypy.request.json series_id = data['seriesid'] t = tvdb_api.Tvdb() s = t[series_id] # db = models.connect() if cherrypy.request.db.query(Show).filter(Show.show_id == series_id).first() is None: # save new show to db first_aired_date = datetime.strptime(s['firstaired'], "%Y-%m-%d") new_show = Show(show_id=series_id, show_name=s['seriesname'], first_aired=first_aired_date, is_active=s.data['status'] == 'Continuing', banner=s['banner']) if new_show.banner == None: new_show.banner = '' # create folder based on show name: new_show.show_directory = '/' + new_show.show_name.replace('.', '').strip() phys_directory = cherrypy.request.db.query(Config).first().tv_parent_directory + new_show.show_directory if not os.path.exists(phys_directory): os.makedirs(phys_directory) cherrypy.request.db.add(new_show) cherrypy.request.db.commit() ActionLog.log('"%s" added.' % new_show.show_name) Utils.add_episodes(series_id, t, cherrypy.request.db) else: status = 'duplicate' # http://stackoverflow.com/questions/7753073/jquery-ajax-post-to-django-view except Exception as ex: # logger.exception(ex) ActionLog.log(ex) status = 'error' return json.dumps(status)
def perform_action(subreddit, item, condition, matchobj): """Performs the action for the condition(s). Also delivers the comment (if set) and creates an ActionLog entry. """ global r disclaimer = ('\n\n*[I am a bot](http://www.reddit.com/r/AutoModerator/' 'comments/q11pu/what_is_automoderator/), and this action ' 'was performed automatically. Please [contact the ' 'moderators of this subreddit](http://www.reddit.com/' 'message/compose?to=%2Fr%2F'+item.subreddit.display_name+ ') if you have any questions or concerns.*') # build the comment if multiple conditions were matched if isinstance(condition, list): comment = '' if any([c.comment for c in condition]): for c in condition: if c.comment: comment += '* '+c.comment+'\n' # bit of a hack and only logs and uses attributes from first # condition matched, should find a better method condition = condition[0] match = matchobj[0] else: comment = condition.comment match = matchobj # abort if it's an alert/report/removal/set_flair that's already triggered if condition.action in ['alert', 'report', 'remove', 'set_flair']: try: session.query(ActionLog).filter( and_(ActionLog.permalink == get_permalink(item), ActionLog.matched_condition == condition.id)).one() return except NoResultFound: pass # perform replacements with match groups comment = replace_placeholders(comment, match) flair_text = replace_placeholders(condition.set_flair_text, match) flair_class = replace_placeholders(condition.set_flair_class, match) # perform the action if condition.action == 'remove': item.remove(condition.spam) elif condition.action == 'approve': item.approve() elif condition.action == 'set_flair': item.set_flair(flair_text, flair_class) elif condition.action == 'report': item.report() log_request(condition.action) if comment: # put together the comment parts for "public" comments if condition.comment_method in ['comment', 'message']: if subreddit.comment_header: comment = subreddit.comment_header+'\n\n'+comment if subreddit.comment_footer: comment = comment+'\n\n'+subreddit.comment_footer comment += disclaimer # deliver the comment if condition.comment_method == 'comment': post_comment(item, comment) log_request('comment') log_request('distinguish') elif condition.comment_method == 'modmail': permalink = get_permalink(item) if isinstance(item, praw.objects.Comment): permalink += '?context=5' r.send_message('/r/'+subreddit.name, 'AutoModerator condition matched', permalink+'\n\n'+comment) log_request('modmail') elif condition.comment_method == 'message': if item.author: r.send_message(item.author.name, 'AutoModerator condition matched', get_permalink(item)+'\n\n'+comment) log_request('message') # log the action taken action_log = ActionLog() action_log.subreddit_id = subreddit.id if item.author: action_log.user = item.author.name action_log.permalink = get_permalink(item) action_log.created_utc = datetime.utcfromtimestamp(item.created_utc) action_log.action_time = datetime.utcnow() action_log.action = condition.action action_log.matched_condition = condition.id if isinstance(item, praw.objects.Submission): action_log.title = item.title action_log.url = item.url action_log.domain = item.domain logging.info(' /r/%s: %s submission "%s"', subreddit.name, condition.action, item.title.encode('ascii', 'ignore')) elif isinstance(item, praw.objects.Comment): if item.author: logging.info(' /r/%s: %s comment by user %s', subreddit.name, condition.action, item.author.name) else: logging.info(' /r/%s: %s comment by deleted user', subreddit.name, condition.action) session.add(action_log) session.commit()
def perform_action(subreddit, item, condition): """Performs the action for the condition(s) and creates an ActionLog entry.""" global r # post the comment if one is set if isinstance(condition, list): if any([c.comment for c in condition]): comment = ('This has been '+condition[0].action+'d for the ' 'following reasons:\n\n') for c in condition: if c.comment: comment += '* '+c.comment+'\n' post_comment(item, comment) # bit of a hack and only logs first action matched # should find a better method condition = condition[0] elif condition.comment: post_comment(item, condition.comment) # perform the action if condition.action == 'remove': item.remove() elif condition.action == 'approve': item.approve() elif condition.action == 'alert': r.compose_message( '#'+subreddit.name, 'Reported Item Alert', 'The following item has received a large number of reports, '+ 'please investigate:\n\n'+item) # log the action taken action_log = ActionLog() action_log.subreddit_id = subreddit.id action_log.action_time = datetime.utcnow() action_log.action = condition.action if isinstance(item, str) or isinstance(item, unicode): # for report threshold alert, we only know permalink to item action_log.permalink = item else: action_log.user = item.author.name action_log.created_utc = datetime.utcfromtimestamp(item.created_utc) action_log.matched_condition = condition.id if isinstance(item, reddit.objects.Submission): action_log.title = item.title action_log.permalink = item.permalink action_log.url = item.url action_log.domain = item.domain logging.info(' /r/%s: %sd submission "%s"', subreddit.name, condition.action, item.title.encode('ascii', 'ignore')) elif isinstance(item, reddit.objects.Comment): action_log.permalink = ('http://www.reddit.com/r/'+ item.subreddit.display_name+ '/comments/'+item.link_id.split('_')[1]+ '/a/'+item.id) logging.info(' %sd comment by user %s', condition.action, item.author.name) db.session.add(action_log) db.session.commit()
def perform_action(subreddit, item, condition): """Performs the action for the condition(s). Also delivers the comment (if set) and creates an ActionLog entry. """ global r disclaimer = ('\n\n*I am a bot, and this action was performed ' 'automatically. Please [contact the moderators of this ' 'subreddit](http://www.reddit.com/message/compose?' 'to=%23'+item.subreddit.display_name+') if you have any ' 'questions or concerns.*') # build the comment if multiple conditions were matched if isinstance(condition, list): if any([c.comment for c in condition]): if condition[0].action == 'alert': verb = 'alerted' else: verb = condition[0].action+'d' comment = ('This has been '+verb+' for the following reasons:\n\n') for c in condition: if c.comment: comment += '* '+c.comment+'\n' post_comment(item, comment) # bit of a hack and only logs and uses attributes from first # condition matched, should find a better method condition = condition[0] else: comment = condition.comment # abort if it's an alert and we've already alerted on this item if condition.action == 'alert': try: ActionLog.query.filter( and_(ActionLog.permalink == get_permalink(item), ActionLog.action == 'alert')).one() return except NoResultFound: pass # perform the action if condition.action == 'remove': item.remove(condition.spam) elif condition.action == 'approve': item.approve() elif condition.action == 'set_flair': item.set_flair(condition.set_flair_text, condition.set_flair_class) # deliver the comment if set if comment: if condition.comment_method == 'comment': post_comment(item, comment+disclaimer) elif condition.comment_method == 'modmail': r.compose_message('#'+subreddit.name, 'AutoModerator condition matched', get_permalink(item)+'\n\n'+comment) elif condition.comment_method == 'message': r.compose_message(item.author.name, 'AutoModerator condition matched', get_permalink(item)+'\n\n'+comment+disclaimer) # log the action taken action_log = ActionLog() action_log.subreddit_id = subreddit.id action_log.user = item.author.name action_log.permalink = get_permalink(item) action_log.created_utc = datetime.utcfromtimestamp(item.created_utc) action_log.action_time = datetime.utcnow() action_log.action = condition.action action_log.matched_condition = condition.id if isinstance(item, reddit.objects.Submission): action_log.title = item.title action_log.url = item.url action_log.domain = item.domain logging.info(' /r/%s: %s submission "%s"', subreddit.name, condition.action, item.title.encode('ascii', 'ignore')) elif isinstance(item, reddit.objects.Comment): logging.info(' /r/%s: %s comment by user %s', subreddit.name, condition.action, item.author.name) db.session.add(action_log) db.session.commit()
def perform_action(subreddit, item, condition): """Performs the action for the condition(s). Also delivers the comment (if set) and creates an ActionLog entry. """ global r comment = None disclaimer = ('\n\n*I am a bot, and this action was performed ' 'automatically. Please [contact the moderators of this ' 'subreddit](http://www.reddit.com/message/compose?' 'to=%23' + item.subreddit.display_name + ') if you have any ' 'questions or concerns.*') # build the comment if multiple conditions were matched if isinstance(condition, list): # build short reason string for modporn submission later short_reason = ' '.join([ '[' + c.short_reason + ']' for c in condition if c.short_reason is not None ]) if any([c.comment for c in condition]): if condition[0].action == 'alert': verb = 'alerted' else: verb = condition[0].action + 'd' comment = ('Unfortunately, your submission has been ' + verb + ' for the following reasons:\n\n') for c in condition: if c.comment: comment += '* ' + c.comment + '\n' comment += ('\nFor more information regarding these issues please ' ' [see the FAQ](http://www.reddit.com/r/' + item.subreddit.display_name + '/faq), and feel ' 'free to resubmit once they have been resolved. ' 'Thank you!') # bit of a hack and only logs and uses attributes from first # condition matched, should find a better method condition = condition[0] else: short_reason = condition.short_reason comment = condition.comment # abort if it's an alert and we've already alerted on this item if condition.action == 'alert': try: ActionLog.query.filter( and_(ActionLog.permalink == get_permalink(item), ActionLog.action == 'alert')).one() return except NoResultFound: pass # perform the action if condition.action == 'remove': item.remove(condition.spam) elif condition.action == 'approve': item.approve() elif condition.action == 'set_flair': item.set_flair(condition.set_flair_text, condition.set_flair_class) # deliver the comment if set if comment: if condition.comment_method == 'comment': post_comment(item, comment + disclaimer) elif condition.comment_method == 'modmail': r.compose_message('#' + subreddit.name, 'AutoModerator condition matched', get_permalink(item) + '\n\n' + comment) elif condition.comment_method == 'message': r.compose_message( item.author.name, 'AutoModerator condition matched', get_permalink(item) + '\n\n' + comment + disclaimer) # log the action taken action_log = ActionLog() action_log.subreddit_id = subreddit.id action_log.user = item.author.name action_log.permalink = get_permalink(item) action_log.created_utc = datetime.utcfromtimestamp(item.created_utc) action_log.action_time = datetime.utcnow() action_log.action = condition.action action_log.matched_condition = condition.id if isinstance(item, reddit.objects.Submission): action_log.title = item.title action_log.url = item.url action_log.domain = item.domain # logging.info(' /r/%s: %s submission "%s"', # subreddit.name, # condition.action, # item.title.encode('ascii', 'ignore')) logging.info(' /r/%s: %s submission "%s"', subreddit.name, condition.action, item.title) elif isinstance(item, reddit.objects.Comment): logging.info(' /r/%s: %s comment by user %s', subreddit.name, condition.action, item.author.name) db.session.add(action_log) db.session.commit() # SFWPorn network moderation requirements # NO LONGER NEEDED # if (isinstance(item, reddit.objects.Submission) and # condition.action == 'remove' and # short_reason is not None): # # submit to ModerationPorn # r.submit('ModerationPorn', # '['+subreddit.name+'] '+ # '['+item.author.name+'] '+ # item.title+ # ' '+short_reason, # url=item.permalink) if (isinstance(item, reddit.objects.Submission) and condition.action == 'remove' and comment): # send them a PM as well r.compose_message(item.author.name, 'SFWPorn submission removed', get_permalink(item) + '\n\n' + comment + disclaimer)
def CreateIndexes(is_full_index = False): db = models.connect() # check if index is empty to make the default index indexes = db.query(LinkIndex).all() #if len(indexes) == 0: #start new indexing thingy source = db.query(ScanURL).filter(ScanURL.media_type == "index").first() if (source): browser = source_login(source) if browser is None: ActionLog.log('%s could not logon' % source.login_page) else: # this is the first page, we have to index by 1 for each subsequent page i = 1 is_indexed = False duplicates_encountered = 0 try: while is_indexed == False: link = source.url if i > 1: link = '%sindex%s.html' % (source.url, i) if i%100 == 0: browser = source_login(source) # reset the browser session every 100 pages soup = browser.get(link).soup all_rows_soup = soup.select(source.link_select) # get all rows $("#threadbits_forum_73 tr").soup page_adds = 0 for row in all_rows_soup: if 'STICKY' in row.text.upper(): # skip sticky links continue else: row_links = row.find_all('a') links_with_text = [t for t in row_links if t.text != ''] index_link = links_with_text[0] l = LinkIndex() l.link_text = index_link.text l.link_url = index_link.attrs['href'] l.id = int(index_link.attrs['id'].split('_')[2]) if l.link_text == '': continue existing_link_index = db.query(LinkIndex).filter(LinkIndex.id == l.id).first() if existing_link_index: duplicates_encountered += 1 if duplicates_encountered > 35 and not is_full_index: #this should be 1 plus pages is_indexed = True break else: continue else: page_adds += 1 db.add(l) if page_adds > 0: db.commit() if i == 100000: is_indexed = True else: print(i) i += 1 except Exception as ex: print(ex)