Ejemplo n.º 1
0
class RSSCache:

    def __init__(self, cache_max_size, cache_ttl_seconds):
        self.cache = TTLCache(ttl=cache_ttl_seconds, maxsize=cache_max_size)

    def update_cache(self, key, value):
        self.cache.update({key: value})

    def get_from_cache(self, key):
        return self.cache.get(key)
Ejemplo n.º 2
0
class Bot:
    """The twitter bot."""

    # Consts
    CALL_TEXT = "{} second encrypted call at {}"
    HASHTAGS = "#SeattleEncryptedComms"
    TWEET_PADDING = 20
    BASE_URL = "https://api.openmhz.com/kcers1b/calls/newer?time={}&filter-type=talkgroup&filter-code=44912,45040,45112,45072,45136"

    # DEBUG URL TO GET A LOT OF API RESPONSES
    # BASE_URL = "https://api.openmhz.com/kcers1b/calls/newer?time={}"

    def __init__(self) -> None:
        """Initializes the class."""
        self.callThreshold = int(os.getenv("CALL_THRESHOLD", 1))
        self.debug = os.getenv("DEBUG", "true").lower() == "true"
        self.reportLatency = os.getenv("REPORT_LATENCY",
                                       "false").lower() == "true"
        self.window_minutes = int(os.getenv("WINDOW_M", 5))
        self.timezone = pytz.timezone(os.getenv("TIMEZONE", "US/Pacific"))
        # The actual look back is the length of this lookback + lag compensation. For example: 300+45=345 seconds
        self.lookback = os.getenv("LOOKBACK_S", 300)

        self.cachedTweet: int = None
        self.cachedTime: datetime = None
        self.cache = TTLCache(maxsize=100, ttl=self.lookback)
        self.scraper = Scraper.Instance(self.BASE_URL, self.lookback)

        self.latency = [timedelta(seconds=0)]

        if not self.debug:
            # Does not need to be saved for later.
            # If the keys aren't in env this will still run.
            auth = tweepy.OAuthHandler(os.getenv("CONSUMER_KEY", ""),
                                       os.getenv("CONSUMER_SECRET", ""))
            auth.set_access_token(os.getenv("ACCESS_TOKEN_KEY", ""),
                                  os.getenv("ACCESS_TOKEN_SECRET", ""))
            self.api = tweepy.API(auth)
            # Test the authentication. This will gracefully fail if the keys aren't present.
            try:
                self.api.rate_limit_status()
            except TweepError as e:
                if e.api_code == 215:
                    log.error("No keys or bad keys")
                else:
                    log.error("Other API error: {}".format(e))
                exit(1)

        self.interval = Set.Interval(30, self._check)

    def _kill(self) -> None:
        """This kills the c̶r̶a̶b̶  bot."""
        self.interval.cancel()
        exit(0)

    def _getUniqueCalls(self, calls: dict) -> list:
        """Filters the return from the scraper to only tweet unique calls.
        Works by checking if the cache already has that call ID.
        Args:
            calls (list): The complete list of calls scraped.
        Returns:
            list: A filtered list of calls.
        """
        res: List[dict] = []
        for call in calls:
            # If the call is already in the cache skip.
            if call["_id"] in self.cache.keys():
                continue
            # If it isn't, cache it and return it.
            else:
                # Might want to actually store somthing? Who knows.
                self.cache.update({call["_id"]: 0})
                res.append(call)
        return res

    def _check(self) -> None:
        """Checks the API and sends a tweet if needed."""
        try:
            log.info(f"Checking!: {datetime.now()}")
            try:
                json = self.scraper.getJSON()
                calls = self._getUniqueCalls(json["calls"])
                log.info(f"Found {len(calls)} calls.")
                if len(calls) > 0:
                    self._postTweet(calls)
            except TypeError as e:
                if json == None:
                    # We already have an error message from the scraper
                    return
                log.exception(e)
                return
        except KeyboardInterrupt as e:
            # Literally impossible to hit which might be an issue? Catching keyboard interrupt could happen in its own thread or something but that sounds complicated 👉👈
            self._kill()

        if self.reportLatency:
            sum = sum(self.latency).total_seconds()
            avg = round(sum / len(self.latency), 3)
            log.info(f"Average latency for the last 100 calls: {avg} seconds")

    def _postTweet(self, calls: list) -> None:
        """Posts a tweet.
        Args:
            calls (list): The call objects to post about.
        """

        # Filter to make sure that calls are actually recent. There can be a weird behavior of the API returning multiple hours old calls all at once. Also filters for calls under the length threshold.
        filteredCalls: List[dict] = []
        for call in calls:
            diff = datetime.now(pytz.utc) - datetime.strptime(
                call["time"], "%Y-%m-%dT%H:%M:%S.000%z")
            if not abs(diff.total_seconds()) >= 1.8e3:
                if call["len"] < self.callThreshold:
                    log.debug(
                        f"Call of size {call['len']} below threshold ({self.callThreshold})"
                    )
                    continue
                filteredCalls.append(call)

                if self.reportLatency:
                    # Store latency
                    self.latency.append(diff)
                    if len(self.latency) > 100:
                        self.latency.pop(0)

        if len(filteredCalls) == 0:
            # If there's nothing to post, simply leave
            return

        msgs = self._generateTweets(filteredCalls)

        if self.debug:
            msg = " | ".join(msgs)
            log.debug(f"Would have posted: {msg}")
            return

        # Check for a cached tweet, then check if the last tweet was less than the window ago. If the window has expired dereference the cached tweet.
        if (self.cachedTime != None
                and self.cachedTime + timedelta(minutes=self.window_minutes) <=
                datetime.now()):
            self.cachedTweet = None

        try:
            if self.cachedTweet != None:
                for msg in msgs:
                    # Every time it posts the new ID gets stored so this works
                    self.cachedTweet = self.api.update_status(
                        msg, self.cachedTweet).id
            else:
                for index, msg in enumerate(msgs):
                    if index == 0:
                        # Since there isn't a cached tweet yet we have to send a non-reply first
                        self.cachedTweet = self.api.update_status(msg).id
                    else:
                        self.cachedTweet = self.api.update_status(
                            msg, self.cachedTweet).id
            self.cachedTime = datetime.now()
        except tweepy.TweepError as e:
            log.exception(e)

    def _timeString(self, call: dict) -> str:
        """Generates a time code string for a call.
        Args:
            call (dict): The call to get time from.
        Returns:
            str: A timestamp string in I:M:S am/pm format.
        """
        # Get time from the call.
        date = datetime.strptime(call["time"], "%Y-%m-%dT%H:%M:%S.000%z")
        # F**k I hate how computer time works
        localized = date.replace(tzinfo=pytz.utc).astimezone(self.timezone)
        normalized = self.timezone.normalize(localized)
        return normalized.strftime("%#I:%M:%S %p")

    def _chunk(self, callStrings: list) -> list:
        """Chunks tweets into an acceptable length.

        Chunking. Shamelessly stolen from `SeattleDSA/signal_scanner_bot/twitter.py` :)

        Args:
            call_strings (list): List of strings derived from calls.

        Returns:
            list: A list of tweet strings to post
        """
        tweetList: List[str] = []
        baseIndex = 0

        # Instead of spliting on words I want to split along call lines.
        subTweet: str = ""
        for index in range(len(callStrings)):
            if len(tweetList) == 0:
                subTweet = (", ".join(callStrings[baseIndex:index]) + " ... " +
                            self.HASHTAGS)
            elif index < len(callStrings):
                subTweet = ", ".join(callStrings[baseIndex:index]) + " ..."
            elif index == len(callStrings):
                subTweet = ", ".join(callStrings[baseIndex:index])

            if len(subTweet) > 280 - self.TWEET_PADDING:
                lastIndex = index - 1
                tweetList.append(", ".join(callStrings[baseIndex:lastIndex]) +
                                 " ...")
                baseIndex = lastIndex

        tweetList.append(", ".join(callStrings[baseIndex:]))
        listLength = len(tweetList)
        for index in range(len(tweetList)):
            if index == 0:
                tweetList[
                    index] += f" {self.HASHTAGS} {index + 1}/{listLength}"
            else:
                tweetList[index] += f" {index + 1}/{listLength}"

        return tweetList

    def _generateTweets(self, calls: list) -> list:
        """Generates tweet messages.
        Args:
            call (list): The calls to tweet about.
        Returns:
            list: The tweet messages, hopefully right around the character limit.
        """
        callStrings: List[str] = []

        # First, take all of the calls and turn them into strings.
        for call in calls:
            callStrings.append(
                self.CALL_TEXT.format(
                    call["len"],
                    self._timeString(call),
                ))

        tweet = ", ".join(callStrings) + " " + self.HASHTAGS
        # If we don't have to chunk we can just leave.
        if len(tweet) <= 280:
            return [tweet]
        else:
            tweetList = self._chunk(callStrings)

        return tweetList
Ejemplo n.º 3
0
class KodiClient(object):
    def __init__(self, config):
        self._cache = TTLCache(maxsize=2048, ttl=3600)
        if 'user' in config and 'password' in config:
            self.auth = (config['user'], config['password'])
        else:
            self.auth = None
        self.host = config['host']
        self.port = config['port']
        self.chunk_size = 750
        self._api = Server(
            url='http://{host}:{port}/jsonrpc'.format(**config),
            auth=self.auth)

    def _make_generator(self, method, data_key, **params):
        logger.debug("Fetching first chunk of {}".format(data_key))
        params.update({'limits': {'start': 0, 'end': self.chunk_size}})
        resp = method(**params)
        for d in resp[data_key]:
            yield d
        num_total = resp['limits']['total']
        cur_start = self.chunk_size
        while cur_start < num_total:
            params['limits']['start'] = cur_start
            params['limits']['end'] = cur_start + self.chunk_size
            logger.debug("Fetching next chunk from #{}".format(cur_start))
            resp = method(**params)
            for d in resp[data_key]:
                yield d
            cur_start += self.chunk_size

    @cached()
    def get_artists(self):
        artists = list(self._make_generator(
            self._api.AudioLibrary.GetArtists, 'artists',
            properties=PROPERTIES['artist']))
        self._cache.update({'artist.{}'.format(a['artistid']): a
                            for a in artists})
        return artists

    def get_artist(self, artist_id):
        artist_id = int(artist_id)
        cached = self._cache.get('artist.{}'.format(artist_id))
        if cached is None:
            try:
                artist = self._api.AudioLibrary.GetArtistDetails(
                    artistid=artist_id,
                    properties=PROPERTIES['artist'])['artistdetails']
                self._cache['artist.{}'.format(artist_id)] = artist
                return artist
            except Exception as e:
                return None
        else:
            return cached

    @cached()
    def get_albums(self, artist_id=None, recently_added=False):
        if recently_added:
            return self._api.AudioLibrary.GetRecentlyAddedAlbums(
                properties=PROPERTIES['album'])['albums']
        if artist_id is not None:
            artist_id = int(artist_id)
        params = {'properties': PROPERTIES['album'],
                  'data_key': 'albums'}
        if artist_id:
            params['filter'] = {'artistid': artist_id}
        albums = list(self._make_generator(
            self._api.AudioLibrary.GetAlbums, **params))
        self._cache.update({'album.{}'.format(a['albumid']): a
                            for a in albums})
        return albums

    def get_album(self, album_id):
        album_id = int(album_id)
        cached = self._cache.get('album.{}'.format(album_id))
        if cached is None:
            try:
                album = self._api.AudioLibrary.GetAlbumDetails(
                    albumid=album_id,
                    properties=PROPERTIES['album'])['albumdetails']
                self._cache['album.{}'.format(album_id)] = album
                return album
            except Exception as e:
                self._cache['album.{}'.format(album_id)] = None
                return None
        else:
            return cached


    @cached()  # First-level cache for accessing all tracks
    def get_songs(self, album_id=None):
        if album_id is not None:
            album_id = int(album_id)
        params = {'properties': PROPERTIES['song'],
                  'data_key': 'songs'}
        if album_id:
            params['filter'] = {'albumid': album_id}
        songs = list(self._make_generator(
            self._api.AudioLibrary.GetSongs, **params))
        # Second level cache so that get_song doesn't have to make an API call
        self._cache.update({'song.{}'.format(s['songid']): s for s in songs})
        return songs

    def get_song(self, song_id):
        song_id = int(song_id)
        cached = self._cache.get('song.{}'.format(song_id))
        if cached is None:
            try:
                song = self._api.AudioLibrary.GetSongDetails(
                    songid=song_id,
                    properties=PROPERTIES['song'])['songdetails']
                self._cache['song.{}'.format(song_id)] = song
                return song
            except Exception as e:
                self._cache['song.{}'.format(song_id)] = None
                return None
        else:
            return cached

    @cached()
    def get_url(self, filepath):
        path = self._api.Files.PrepareDownload(filepath)
        url = "http://{}{}:{}/{}".format(
            "{}:{}@".format(*self.auth) if self.auth else '',
            self.host, self.port, path['details']['path'])
        self._cache['trackurl.{}'.format(url)] = filepath
        return url
Ejemplo n.º 4
0
class MicrosoftGraphHelper:
    def __init__(self, tenant_id, client_id, client_secret):
        self.__cache = TTLCache(
            maxsize=1, ttl=55 *
            60)  # Set to expire after 55 minutes so it is always fresh

        self.__tenant_id = tenant_id
        self.__client_id = client_id,
        self.__client_secret = client_secret
        self.__get_cache('microsoft_security_graph_access_token')

    def __set_cache(self, d):
        try:
            for k, v in d.items():
                self.__cache.update([(k, v)])
        except AttributeError:
            for k, v in d.items():
                self.__cache.update([(k, v)])

    def __get_cache(self, key):
        if key not in self.__cache:
            self.__set_cache({key: self.__refresh_access_token()})
        return self.__cache.get(key)

    def __refresh_access_token(self):
        token_url = 'https://login.microsoftonline.com/{}/oauth2/v2.0/token'.format(
            self.__tenant_id)
        post_data = {
            "client_id": self.__client_id,
            "scope": ["https://graph.microsoft.com/.default"],
            "client_secret": self.__client_secret,
            "grant_type": "client_credentials"
        }
        r = requests.post(token_url, data=post_data)
        log.debug(r.content)
        json = r.json()
        return json.get("access_token")

    def check_status_code(self, response):
        if 200 <= response.status_code <= 299:
            return True
        # Access token has expired, request a new one
        elif response.status_code == 401:
            log.debug(response.content)
            access_token = self.__refresh_access_token()
            self.__set_cache(
                {"microsoft_security_graph_access_token": access_token})
            return False
        else:
            raise ValueError("Invalid response from Microsoft Security Graph")

    def microsoft_graph_request(self, method, url, headers, json=None):
        r = None
        for i in list(range(2)):
            if method == "GET":
                r = requests.get(url, headers=headers)
            elif method == "PATCH":
                r = requests.patch(url, headers=headers, json=json)
            else:
                raise ValueError("{} not implemented.".format(method))

            if self.check_status_code(r):
                break
            # If it fails a second time, something more serious is wrong, ie: creds, query, etc.
            elif i == 1:
                log.info(r.content)
                return False
        return r

    def get_access_token(self):
        return self.__get_cache("microsoft_security_graph_access_token")

    def clear_cache(self):
        self.__cache.clear()
Ejemplo n.º 5
0
class AutoTTLCache(MutableMapping):
    def __init__(self,
                 items=None,
                 *,
                 maxsize,
                 ttl,
                 timer=time.monotonic,
                 getsizeof=None):
        self._cache_lock = threading.Lock()
        self._cache = TTLCache(maxsize, ttl, timer=timer, getsizeof=getsizeof)
        if items is not None:
            self._cache.update(items)
        self._monitor = CacheMonitor(self)

    @property
    def ttl(self):
        with self._cache_lock:
            return self._cache.ttl

    @property
    def maxsize(self):
        with self._cache_lock:
            return self._cache.maxsize

    @property
    def timer(self):
        with self._cache_lock:
            return self._cache.timer

    def expire(self):
        with self._cache_lock:
            self._cache.expire()

    def __contains__(self, key):
        with self._cache_lock:
            return key in self._cache

    def __setitem__(self, k, v):
        with self._cache_lock:
            self._cache[k] = v

    def __delitem__(self, k):
        with self._cache_lock:
            del self._cache[k]

    def __getitem__(self, k):
        with self._cache_lock:
            return self._cache[k]

    def __len__(self) -> int:
        with self._cache_lock:
            return len(self._cache)

    def __iter__(self):
        with self._cache_lock:
            keys = list(self._cache)
        yield from keys

    # TODO: __reduce__ and __setstate__

    def __repr__(self):
        return f"{type(self).__name__}(max_size={self.maxsize}, ttl={self.ttl})"

    def clear(self):
        with self._cache_lock:
            self._cache.clear()

    def get(self, *args, **kwargs):
        with self._cache_lock:
            self._cache.get(*args, **kwargs)

    def pop(self, *args, **kwargs):
        with self._cache_lock:
            self._cache.pop(*args, **kwargs)

    def setdefault(self, *args, **kwargs):
        with self._cache_lock:
            self._cache.setdefault(*args, **kwargs)

    def popitem(self):
        with self._cache_lock:
            self._cache.popitem()