def __init__(self): self.log = logging.getLogger(__name__) # Tweet Cache Manager self.twitter = TweetManager() self.twython = Twython(config.get('Twitter', 'consumer_key'), config.get('Twitter', 'consumer_secret'), config.get('Twitter', 'access_token'), config.get('Twitter', 'access_secret')) self.anime_link = config.get('SauceNao', 'source_link', fallback='anidb').lower() self.nsfw_previews = config.getboolean('TraceMoe', 'nsfw_previews', fallback=False) self.failed_responses = config.getboolean('SauceNao', 'respond_to_failed', fallback=True) self.ignored_indexes = [ int(i) for i in config.get( 'SauceNao', 'ignored_indexes', fallback='').split(',') ] # Pixiv self.pixiv = Pixiv() # Cache some information about ourselves self.my = api.me() self.log.info(f"Connected as: {self.my.screen_name}") # Image URL's are md5 hashed and cached here to prevent duplicate API queries. This is cleared every 24-hours. # I'll update this in the future to use a real caching mechanism (database or redis) self._cached_results = {} # A cached list of ID's for parent posts we've already processed # Used in the check_monitored() method to prevent re-posting sauces when posts are re-tweeted self._posts_processed = [] # The ID cutoff, we populate this once via an initial query at startup try: self.mention_id = tweepy.Cursor(api.mentions_timeline, tweet_mode='extended', count=1).items(1).next().id except StopIteration: self.mention_id = 0 try: self.self_id = tweepy.Cursor(api.user_timeline, tweet_mode='extended', count=1).items(1).next().id except StopIteration: self.self_id = 0 self.monitored_since = {}
def __init__(self): """ Handles performing cache and API queries on tweepy objects """ self.log = logging.getLogger(__name__) self.my = api.me()
def __init__(self): self.log = logging.getLogger(__name__) # Tweet Cache Manager self.twitter = TweetManager() self.twython = Twython(config.get('Twitter', 'consumer_key'), config.get('Twitter', 'consumer_secret'), config.get('Twitter', 'access_token'), config.get('Twitter', 'access_secret')) # SauceNao self.minsim_mentioned = float( config.get('SauceNao', 'min_similarity_mentioned', fallback=50.0)) self.minsim_monitored = float( config.get('SauceNao', 'min_similarity_monitored', fallback=65.0)) self.minsim_searching = float( config.get('SauceNao', 'min_similarity_searching', fallback=70.0)) self.persistent = config.getboolean('Twitter', 'enable_persistence', fallback=False) self.anime_link = config.get('SauceNao', 'source_link', fallback='anidb').lower() self.sauce = SauceNao(api_key=config.get('SauceNao', 'api_key', fallback=None), min_similarity=min(self.minsim_mentioned, self.minsim_monitored, self.minsim_searching), priority=[21, 22, 5]) # Trace.moe self.tracemoe = None # type: Optional[ATraceMoe] if config.getboolean('TraceMoe', 'enabled', fallback=False): self.tracemoe = ATraceMoe( config.get('TraceMoe', 'token', fallback=None)) self.nsfw_previews = config.getboolean('TraceMoe', 'nsfw_previews', fallback=False) # Pixiv self.pixiv = Pixiv() # Cache some information about ourselves self.my = api.me() self.log.info(f"Connected as: {self.my.screen_name}") # Image URL's are md5 hashed and cached here to prevent duplicate API queries. This is cleared every 24-hours. # I'll update this in the future to use a real caching mechanism (database or redis) self._cached_results = {} # A cached list of ID's for parent posts we've already processed # Used in the check_monitored() method to prevent re-posting sauces when posts are re-tweeted self._posts_processed = [] # The ID cutoff, we populate this once via an initial query at startup try: self.since_id = tweepy.Cursor(api.mentions_timeline, tweet_mode='extended', count=1).items(1).next().id except StopIteration: self.since_id = 0 self.monitored_since = {}