Example #1
0
    def __init__(self, name):
        dirs = (os.listdir(os.getcwd()))

        if "redditauth" in dirs:
            with open("redditauth", 'r') as f:
                reddit = dict()
                try:
                    reddit["secret"] = f.readline().strip("\n").lstrip(
                        "secret ")
                    reddit["pass"] = f.readline().strip("\n")[14:]
                    reddit["user"] = f.readline().strip("\n")[9:]
                    print(reddit)
                except Exception:
                    logging.log(logging.WARNING,
                                "redditauth bad configuration")
                    sys.exit

            auth_reddit = Reddit(
                client_id='oy5ifWn5vvoDOg',
                client_secret=reddit["secret"],
                password=reddit["pass"],
                username=reddit["user"],
                user_agent='pyrite',
            )

        else:
            raise FileNotFoundError("redditauth missing")

        self.name = name

        if "multis" in dirs:
            with open("multis", 'r') as f:
                multi = f.readlines()

            for ele in multi:
                ele = ele.strip("\n")
                Pybot.map_r_mr[ele] = auth_reddit.multireddit(
                    reddit["user"], ele)
                Pybot.arrays[ele] = []
                Pybot.arrays[ele + "top"] = []

        else:
            logging.log(
                logging.WARNING,
                "missing multis file, bot will work without reddit fetching commands"
            )

        self.handlers()
        self.updater.start_polling()
        self.updater.idle()
        self.name = name
def test_get_multireddits_public(
        test_user: str,
        test_multireddits: list[str],
        limit: int,
        reddit_instance: praw.Reddit,
        downloader_mock: MagicMock,
):
    downloader_mock._determine_sort_function.return_value = praw.models.Subreddit.hot
    downloader_mock.sort_filter = RedditTypes.SortType.HOT
    downloader_mock.args.limit = limit
    downloader_mock.args.multireddit = test_multireddits
    downloader_mock.args.user = test_user
    downloader_mock.reddit_instance = reddit_instance
    downloader_mock._create_filtered_listing_generator.return_value = \
        RedditDownloader._create_filtered_listing_generator(
            downloader_mock,
            reddit_instance.multireddit(test_user, test_multireddits[0]),
        )
    results = RedditDownloader._get_multireddits(downloader_mock)
    results = [sub for res in results for sub in res]
    assert all([isinstance(res, praw.models.Submission) for res in results])
    assert len(results) == limit
class SubredditStats(object):
    """Contain all the functionality of the subreddit_stats command."""
    def __init__(self, subreddit=None, multireddit=None, days=2):
        """Initialize the SubredditStats instance with config options."""
        self.submissions = []
        self.comments = []
        # extract post with at least 2 ours and less the <days+1> days
        now_utc = calendar.timegm(time.gmtime())
        self.max_date_thread = now_utc - HOUR_IN_SECONDS * 2
        self.min_date_thread = now_utc - DAY_IN_SECONDS * (days + 1)
        self.min_date = now_utc - DAY_IN_SECONDS * days
        self.reddit = Reddit(check_for_updates=False, user_agent=AGENT)
        if subreddit:
            self.subreddit = self.reddit.subreddit(subreddit)
        elif multireddit:
            self.subreddit = self.reddit.multireddit(*multireddit)
        else:
            raise ValueError('Specify subreddit or multireddit')

    def fetch_recent_submissions(self):
        """Fetch recent submissions in subreddit with boundaries.

        :param max_duration: When set, specifies the number of days to include

        """

        LOGGER.debug('Fetching submissions until %d', (self.min_date_thread))
        for submission in self.subreddit.new(limit=None):
            if submission.created_utc <= self.min_date_thread:
                break
            if submission.created_utc > self.max_date_thread:
                continue
            self.submissions.append(submission)

    def fetch_comments(self):
        """Write comments file."""
        LOGGER.debug('Fetching comments on %d submissions',
                     len(self.submissions))

        for index, submission in enumerate(self.submissions):
            if submission.num_comments == 0:
                continue
            submission.comment_sort = 'top'

            more_comments = submission.comments.replace_more()
            if more_comments:
                skipped_comments = sum(x.count for x in more_comments)
                LOGGER.debug('Skipped %d MoreComments (%d comments)',
                             len(more_comments), skipped_comments)

            LOGGER.debug('Fetched %d comments on %d/%d submissions',
                         len(submission.comments.list()), index + 1,
                         len(self.submissions))
            self.comments.extend(submission.comments.list())

    def process_comments(self, score_limit):
        """Apply filters to comments."""
        LOGGER.debug('Fetched %d comments', len(self.comments))
        self.comments = [
            comment for comment in self.comments if comment.score > score_limit
        ]
        self.comments.sort(key=lambda x: x.score, reverse=True)
        LOGGER.debug('Remained %d comments', len(self.comments))

    def publish_csv(self):
        """Write comments to file."""
        filename = 'comments-%s-%d.csv' % (self.subreddit.display_name,
                                           self.max_date_thread)
        LOGGER.debug('Processing comments to %s', filename)
        with open(filename, 'w', newline='', encoding='utf-8') as filecsv:
            writer = csv.writer(filecsv, dialect=CustomDialect)
            writer.writerow([
                'd', 'score', 'author', 'link', 'created_utc', 'distinguished',
                'gilded', 'body'
            ])
            for comment in self.comments:
                writer.writerow([
                    comment.id, comment.score, comment.author,
                    comment.permalink(fast=True), comment.created_utc,
                    comment.distinguished, comment.gilded, comment.body
                ])
        return filename

    def publish_feed(self):
        """Generate an RSS feed"""
        data = {
            'title':
            'Best comments of %s' % self.subreddit.path,
            'url':
            self.reddit.config.reddit_url + self.subreddit.path,
            'description':
            'Best comments from %s' % self.subreddit.path,
            'rss2update':
            formatdate(),
            'entries': [{
                'title':
                '[%d] %s on %s' %
                (comment.score, comment.author, comment.submission.title),
                'url':
                self.reddit.config.reddit_url + comment.permalink,
                'text':
                comment.body_html,
                'author':
                comment.author,
                'categories': [comment.subreddit.display_name],
                'rss2update':
                formatdate(comment.created_utc)
            } for comment in self.comments]
        }
        renderer = pystache.Renderer()
        output = renderer.render_path(
            path.join(path.dirname(path.abspath(__file__)), 'rss.mustache'),
            data)
        with open('best-comment.xml', 'w', encoding='utf8') as text_file:
            text_file.write(output)
        return True

    def run(self, action, score_limit):
        """Run stats and return the created Submission."""
        LOGGER.info('Analyzing subreddit: %s', self.subreddit)

        produce_output = None
        if action == 'csv':
            produce_output = self.publish_csv
        elif action == 'feed':
            produce_output = self.publish_feed
        else:
            raise ValueError('Invalid action: %s' % action)

        self.fetch_recent_submissions()
        self.fetch_comments()

        if not self.submissions:
            LOGGER.warning('No submissions were found.')
            return

        self.process_comments(score_limit)

        produce_output()