Esempio n. 1
0
def process_mail(r):
    '''Processes notifications.  Returns True if any item was processed.'''
    my_mail = list(r.get_unread(unset_has_mail=False))
    to_process = [Request(x, r) for x in my_mail]
    for item in to_process:
        if item.is_summons() or item.is_PM():
            reply_text = item.roll()
            okay = True
            if not reply_text:
                reply_text = ("I'm sorry, but I can't find anything"
                              " that I know how to parse.\n\n")
                okay = False
            reply_text += BeepBoop()
            if len(reply_text) > 10000:
                addition = ("\n\n**This reply would exceed 10000 characters"
                            " and has been shortened.  Chaining replies is an"
                            " intended future feature.")
                clip_point = 10000 - len(addition) - len(BeepBoop()) - 200
                reply_text = reply_text[:clip_point] + addition + BeepBoop()
            item.reply(reply_text)
            lprint("{} resolving request: {}.".format(
                "Successfully" if okay else "Questionably", item))
            if not okay:
                item.log(_log_dir)
        else:
            lprint("Mail is not summons or error.  Logging item.")
            item.log(_log_dir)
        item.origin.mark_as_read()
    return (0 < len(to_process))
Esempio n. 2
0
    def roll(self):
        try:
            weights = [i.weight for i in self.outcomes]
            total_weight = sum(weights)
            if self.die != total_weight:
                self.header = "[Table roll error: parsed die did not match sum of item wieghts.]  \n" + self.header
            c = random.randint(1, self.die)
            scan = c
            ind = -1
            while scan > 0:
                ind += 1
                scan -= weights[ind]

            R = TableRoll(d=self.die,
                          rolled=c,
                          head=self.header,
                          out=self.outcomes[ind])
            if len(self.outcomes) != self.die:
                R.error('Expected {} items found {}'.format(
                    self.die, len(self.outcomes)))
            return R
        # TODO: Handle errors more gracefully.
        except Exception as e:
            lprint('Exception in Table roll ({}): {}'.format(self, e))
            return None
Esempio n. 3
0
def scan_submissions(seen, r, search_word):
    '''This function groups the following:
    * Get the newest submissions to /r/DnDBehindTheStreen
    * Attempt to parse the item as containing tables
    * If tables are detected, post a top-level comment requesting that
      table rolls be performed there for readability
    # * Update list of seen tables
    # * Prune seen tables list if large.

    '''
    try:
        # keep_it_tidy_reply = (
        #     "It looks like this post has some tables I might be able to parse."
        #     "  To keep things tidy and not detract from actual discussion"
        #     " of these tables, please make your /u/roll_one_for_me requests"
        #     " as children to this comment." +
        #     BeepBoop() )
        BtS = r.subreddit('DnDBehindTheScreen')
        new_subs = BtS.new(limit=_fetch_limit)
        saw_something_said_something = False
        for item in new_subs:
            TS = TableSource(item, "scan")
            if TS.tables:
                lprint('Found tables, maybe, for submission {}'.format(
                    TS.source.url))
                if search_word:
                    lprint('Searching found tables for search word {}'.format(
                        search_word))

                matching_table = get_table(TS.tables, search_word)
                if matching_table:
                    lprint(
                        'Found table for search word {}'.format(search_word))
                    lprint(matching_table.for_json())
                # lprint(TS.tables)
                top_level_authors = [com.author for com in TS.source.comments]
                # Check if I have already replied
                if not TS.source in seen:
                    seen.append(TS.source)
                    # if not r.user in top_level_authors:
                    # lprint('DEBUG: Not adding comment to post.')
                    # item.add_comment(keep_it_tidy_reply)
                    # lprint("Adding organizational comment to thread with title: {}".format(TS.source.title))
                    # saw_something_said_something = True

        # Prune list to max size
        seen[:] = seen[-_seen_max_len:]
        return saw_something_said_something
    except Exception as e:
        lprint("Error during submissions scan: {}".format(e))
        raise
Esempio n. 4
0
    def _parse(self):
        top = re.search("[dD](\d+)(.*)", self.text)
        if not top:
            return

        self.die = int(top.group(1))
        tail = top.group(2)
        while tail:
            in_match = re.search(_line_regex, tail.strip(_trash))
            if not in_match:
                lprint(
                    "Could not complete parsing InlineTable; in_match did not catch."
                )
                lprint("Returning blank roll area.")
                self.outcomes = [TableItem("1-{}. N/A".format(self.die))]
                return
            this_out = in_match.group(3)
            next_match = re.search(_line_regex[1:], this_out)
            if next_match:
                tail = this_out[next_match.start():]
                this_out = this_out[:next_match.start()]
            else:
                tail = ""

            TI_text = in_match.group(1) + (
                in_match.group(2) if in_match.group(2) else "") + this_out
            try:
                self.outcomes.append(TableItem(TI_text))
            except Exception as e:
                lprint(
                    "Error building TableItem in inline table; item skipped.")
                lprint("Exception:", e)
Esempio n. 5
0
 def get_default_sources(self):
     '''Default sources are OP and top-level comments'''
     try:
         # Add OP
         self._maybe_add_source(self.origin.submission,
                                "this thread's original post")
         # Add Top-level comments
         top_level_comments = self.reddit.get_submission(
             None, self.origin.submission.id).comments
         for item in top_level_comments:
             self._maybe_add_source(
                 item,
                 "[this]({}) comment by {}".format(item.permalink,
                                                   item.author))
     except:
         lprint("Could not add default sources.  (PM without links?)")
Esempio n. 6
0
 def get_link_sources(self):
     links = re.findall("\[.*?\]\s*\(.*?\)", self.origin.body)
     for item in links:
         desc, href = re.search("\[(.*?)\]\s*\((.*?)\)", item).groups()
         href = href.strip()
         if "reddit.com" in href.lower():
             lprint("Fetching href: {}".format(href.lower()))
             if "m.reddit" in href.lower():
                 lprint("Removing mobile 'm.'")
                 href = href.lower().replace("m.reddit", "reddit", 1)
             if ".json" in href.lower():
                 lprint("Pruning .json and anything beyond.")
                 href = href[:href.find('.json')]
             if not 'www' in href.lower():
                 lprint("Injecting 'www.' to href")
                 href = href[:href.find("reddit.com")] + 'www.' + href[
                     href.find("reddit.com"):]
             href = href.rstrip("/")
             lprint("Processing href: {}".format(href))
             self._maybe_add_source(self.reddit.get_submission(href), desc)
Esempio n. 7
0
 def _parse(self):
     main_regex = re.search(_line_regex, self.text.strip(_trash))
     if not main_regex:
         return
     # Grab outcome
     self.outcome = main_regex.group(3).strip(_trash)
     # Get weight / ranges
     if not main_regex.group(2):
         self.weight = 1
     else:
         try:
             start = int(main_regex.group(1).strip(_trash))
             stop = int(main_regex.group(2).strip(_trash))
             self.weight = stop - start + 1
         except:
             self.weight = 1
     # Identify if there is a subtable
     if re.search("[dD]\d+", self.outcome):
         die_regex = re.search("[dD]\d+", self.outcome)
         try:
             self.inline_table = InlineTable(
                 self.outcome[die_regex.start():])
         except RuntimeError as e:
             lprint("Error in inline_table parsing ; table item full text:")
             lprint(self.text)
             lprint(e)
             self.outcome = self.outcome[:die_regex.start()].strip(_trash)
     # this might be redundant
     self.outcome = self.outcome.strip(_trash)
Esempio n. 8
0
def main(debug=False, search_word=''):
    '''main(debug=False)
    Logs into Reddit, looks for unanswered user mentions, and
    generates and posts replies

    '''
    # Initialize
    lprint("Begin main()")
    seen_by_sentinel = []
    # Core loop
    while True:
        try:
            lprint("Signing into Reddit.")
            r = sign_in()
            lprint('Read only mode? {}'.format(r.read_only))
            # print r.user.me()
            trivial_passes_count = _trivial_passes_per_heartbeat - 1
            while True:
                # was_mail = process_mail(r)
                was_mail = False
                was_sub = scan_submissions(seen_by_sentinel, r, search_word)
                trivial_passes_count += 1 if not was_mail and not was_sub else 0
                if trivial_passes_count == _trivial_passes_per_heartbeat:
                    lprint(
                        "Heartbeat.  {} passes without incident (or first pass)."
                        .format(_trivial_passes_per_heartbeat))
                    trivial_passes_count = 0
                time.sleep(_sleep_between_checks)
        except Exception as e:
            lprint("Top level.  Allowing to die for cron to revive.")
            lprint("Error: {}".format(e))
            raise
        # We would like to avoid large caching and delayed logging.
        sys.stdout.flush()