Exemplo n.º 1
0
def prefer_envar(configs: dict) -> dict:
  for config in list(configs):
    config_envar = f"{ENVAR_PREFIX}{config}".lower()
    if os.environ.get(config_envar):
      configs[config]=os.environ.get(config_envar)
      log.info(f"loading {config_envar} from envar. Value: {configs.get(config)}")
    else:
      log.debug(f"no environment config for: {config_envar}")

  return configs
Exemplo n.º 2
0
    def init(self):
        log.info("using cobe to generate comments")
        main_db = self.config.get("cobe_main_db")

        # make sure db was initialized correctly
        if os.path.isfile(main_db):
            # set the initial size
            self.size = os.path.getsize(main_db)
        else:
            log.info(f"cobe db failed to initialize. exiting")
            sys.exit()

        log.debug('filling cobe database for commenting')
        # loop through learning comments until we reach the min db size
        while self.size <= tobytes(self.config.get("cobe_min_db_size")):

            log.info(
                f"cobe db size is: {str(bytesto(self.size, 'm'))}mb, need {self.config.get('cobe_min_db_size')} - learning..."
            )

            # just learn from random subreddits for now
            subreddit = get_subreddit(getsubclass=True)

            log.info(f"learning from /r/{subreddit}")

            # get the comment generator function from pushshift
            comments = self.psapi.get_comments(subreddit)

            # go through 500 comments per subreddit
            for x in range(500):
                # get the comment from the generator function
                try:
                    comment = next(comments)
                except StopIteration as e:
                    log.info(f"end of comments")

                # bot responses are better when it learns from short comments
                if len(comment.body) < 240:
                    log.debug(
                        f"learning comment: {comment.body.encode('utf8')}")

                    # only learn comments that don't contain an avoid word
                    if not any(word in comment.body for word in AVOID_WORDS):
                        self.brain.learn(comment.body.encode("utf8"))

            # update the class size variable so the while loop
            # knows when to break
            self.size = os.path.getsize(main_db)

        log.info(
            f"database min size ({self.config.get('cobe_min_db_size')}) reached"
        )
        self.ready = True
Exemplo n.º 3
0
import random
from apis import reddit_api
from logs.logger import log
from config.reddit.reddit_sub_lists import REDDIT_APPROVED_SUBS
from config.common_config import CONFIG_ROOT

with open(f"{CONFIG_ROOT}/reddit/reddit_avoid_subs.txt", "r") as subfile:
    AVOID_SUBS = subfile.read().splitlines()
    subfile.close()

with open(f"{CONFIG_ROOT}/reddit/reddit_avoid_words.txt", "r") as wordfile:
    AVOID_WORDS = wordfile.read().splitlines()
    wordfile.close()

log.debug(f"avoiding subs: {AVOID_SUBS}")


def get_subreddit(nsfw=False, getsubclass=False):

    # if the subreddit list is being used jut return one from there
    if REDDIT_APPROVED_SUBS:
        log.info(f"picking subreddit from approved list")
        subreddit = reddit_api.subreddit(
            random.choice(REDDIT_APPROVED_SUBS).strip())
        log.info(f"using subreddit: {subreddit.display_name}")
    else:
        log.info(f"picking a random subreddit")
        # otherwise we'll do some logic to get a random subreddit
        subreddit_ok = False
        while not subreddit_ok:
            subreddit = reddit_api.random_subreddit(nsfw=nsfw)