def get_info(username=None, use_fallback=True): """ Get the Twitch info for a particular user or channel. Defaults to the stream channel if not otherwise specified. For response object structure, see: https://github.com/justintv/Twitch-API/blob/master/v3_resources/channels.md#example-response May throw exceptions on network/Twitch error. """ if username is None: username = config['channel'] # Attempt to get the channel data from /streams/channelname # If this succeeds, it means the channel is currently live res = utils.http_request("https://api.twitch.tv/kraken/streams/%s" % username) data = json.loads(res) channel_data = data.get('stream') and data['stream'].get('channel') if channel_data: channel_data['live'] = True channel_data['viewers'] = data['stream'].get('viewers') channel_data['stream_created_at'] = data['stream'].get('created_at') return channel_data if not use_fallback: return None # If that failed, it means the channel is offline # Ge the channel data from here instead res = utils.http_request("https://api.twitch.tv/kraken/channels/%s" % username) channel_data = json.loads(res) channel_data['live'] = False return channel_data
def new_poll(lrrbot, conn, event, respond_to, multi, timeout, poll_id, title, options): """ Command: !poll N http://strawpoll.me/ID Command: !poll N TITLE: OPTION1; OPTION2 Command: !multipoll N TITLE: OPTION1; OPTION2 Section: misc Start a new Strawpoll poll. Post results in N seconds. Multiple polls can be active at the same time. """ if poll_id is not None: url = "http://strawpoll.me/api/v2/polls/%s" % poll_id data = json.loads(utils.http_request(url)) title = data["title"] else: if title is None: title = "LoadingReadyLive poll" if ';' in options: options = [option.strip() for option in options.split(';')] elif ',' in options: options = [option.strip() for option in options.split(',')] else: options = options.split() data = json.dumps({"options": options, "title": title, "multi": multi is not None}) response = utils.http_request("http://strawpoll.me/api/v2/polls", data, "POST", headers = {"Content-Type": "application/json"}) poll_id = json.loads(response)["id"] if timeout is not None: timeout = int(timeout) else: timeout = DEFAULT_TIMEOUT end = time.time() + int(timeout) lrrbot.polls += [(end, title, poll_id, respond_to)] conn.privmsg(respond_to, "New poll: %s (http://strawpoll.me/%s): %s from now" % (title, poll_id, utils.nice_duration(timeout, 1)))
def main(): if twitch.get_info()["live"]: print("Stream is live.") return highlights = send_bot_command("get_data", {"key": "staged_highlights"}) if argv.test: print("Staged highlights: %r" % highlights) if highlights is None: highlights = [] highlights = list(filter(lambda e: e is not None, map(twitch_lookup, highlights))) if highlights == []: return token = get_oauth_token(["https://spreadsheets.google.com/feeds"]) headers = {"Authorization": "%(token_type)s %(access_token)s" % token} url = "https://spreadsheets.google.com/feeds/worksheets/%s/private/full" % SPREADSHEET tree = xml.dom.minidom.parseString(utils.http_request(url, headers=headers)) worksheet = next(iter(tree.getElementsByTagName("entry"))) list_feed = find_schema(worksheet, "http://schemas.google.com/spreadsheets/2006#listfeed") if list_feed is None: print("List feed missing.") return list_feed = xml.dom.minidom.parseString(utils.http_request(list_feed, headers=headers)) post_url = find_schema(list_feed, "http://schemas.google.com/g/2005#post") if post_url is None: print("POST URL missing.") return for highlight in highlights: doc = xml.dom.minidom.getDOMImplementation().createDocument(None, "entry", None) root = doc.documentElement root.setAttribute("xmlns", "http://www.w3.org/2005/Atom") root.setAttribute("xmlns:gsx", "http://schemas.google.com/spreadsheets/2006/extended") root.appendChild(new_field(doc, "SHOW", highlight["title"])) root.appendChild(new_field(doc, "QUOTE or MOMENT", highlight["description"])) root.appendChild(new_field(doc, "YOUTUBE VIDEO LINK", highlight["url"])) root.appendChild(new_field(doc, "ROUGH TIME THEREIN", "before "+ utils.nice_duration(highlight["time"], 0))) root.appendChild(new_field(doc, "NOTES", "From chat user '%s'." % highlight["user"])) if argv.test: print("Command: %s" % doc.toxml()) else: headers["Content-Type"] = "application/atom+xml" utils.http_request(post_url, headers=headers, data=doc.toxml(), method="POST") if not argv.test: send_bot_command("set_data", {"key": "staged_highlights", "value": []})
def viewers(lrrbot, conn, event, respond_to): """ Command: !viewers Section: info Post the number of viewers currently watching the stream """ stream_info = twitch.get_info() if stream_info: viewers = stream_info.get("viewers") else: viewers = None # Since we're using TWITCHCLIENT 3, we don't get join/part messages, so we can't just use # len(lrrbot.channels["#loadingreadyrun"].userdict) # as that dict won't be populated. Need to call this api instead. chatters = utils.http_request("http://tmi.twitch.tv/group/user/%s/chatters" % config["channel"]) chatters = json.loads(chatters).get("chatter_count") if viewers is not None: viewers = "%d %s viewing the stream." % (viewers, "user" if viewers == 1 else "users") else: viewers = "Stream is not live." if chatters is not None: chatters = "%d %s in the chat." % (chatters, "user" if chatters == 1 else "users") else: chatters = "No-one in the chat." conn.privmsg(respond_to, "%s %s" % (viewers, chatters))
def get_group_servers(): """ Get the secondary Twitch chat servers """ res = utils.http_request("https://chatdepot.twitch.tv/room_memberships", {'oauth_token': storage.data['twitch_oauth'][config['username']]}, maxtries=1) res = json.loads(res) def parse_server(s): if ':' in s: bits = s.split(':') return bits[0], int(bits[1]) else: return s, 6667 servers = set(parse_server(s) for m in res['memberships'] for s in m['room']['servers']) # each server appears in this multiple times with different ports... pick one port we prefer for each server server_dict = {} for host, port in servers: server_dict.setdefault(host, set()).add(port) def preferred_port(ports): if 6667 in ports: return 6667 elif ports - {80, 443}: return random.choice(list(ports - {80, 443})) else: return random.choice(list(ports)) servers = [(host, preferred_port(ports)) for host,ports in server_dict.items()] random.shuffle(servers) return servers
def get_oauth_token(scopes): with open("keys.json") as f: keys = json.load(f) t = int(time.time()) header = json.dumps({"alg": "RS256", "typ": "JWT"}).encode("utf-8") claim = json.dumps({ "iss": keys["client_email"], "scope": " ".join(scopes), "aud": "https://accounts.google.com/o/oauth2/token", "iat": t, "exp": t + 60 * 60, }).encode("utf-8") data = base64_encode(header) + b'.' + base64_encode(claim) key = RSA.importKey(keys["private_key"]) h = SHA256.new(data) signer = PKCS1_v1_5.new(key) signature = signer.sign(h) jwt = (data + b'.' + base64_encode(signature)).decode("utf-8") data = { "grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer", "assertion": jwt } ret = json.loads( utils.http_request("https://accounts.google.com/o/oauth2/token", data, "POST")) if "error" in ret: raise Exception(ret["error"]) return ret
def get_oauth_token(scopes): with open("keys.json") as f: keys = json.load(f) t = int(time.time()) header = json.dumps({"alg":"RS256", "typ":"JWT"}).encode("utf-8") claim = json.dumps({ "iss": keys["client_email"], "scope": " ".join(scopes), "aud": "https://accounts.google.com/o/oauth2/token", "iat": t, "exp": t+60*60, }).encode("utf-8") data = base64_encode(header) + b'.' + base64_encode(claim) key = RSA.importKey(keys["private_key"]) h = SHA256.new(data) signer = PKCS1_v1_5.new(key) signature = signer.sign(h) jwt = (data + b'.' + base64_encode(signature)).decode("utf-8") data = {"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer", "assertion": jwt} ret = json.loads(utils.http_request("https://accounts.google.com/o/oauth2/token", data, "POST")) if "error" in ret: raise Exception(ret["error"]) return ret
def _get_public_key(cls): host_url = cls.base_url() if host_url in cls._host_public_key: return cls._host_public_key[host_url] result = http_request(host_url + "/getPublicKey").json() pub_key_string = result["result"] # deal with invalid base64 padding alignment miss_padding = len(pub_key_string) % 4 if miss_padding: pub_key_string = pub_key_string + "=" * (4 - miss_padding) # java rsa public key in PKCS#8 format without header & footer pub_key_content = ("-----BEGIN PUBLIC KEY-----\n" "{}\n" "-----END PUBLIC KEY-----").format(pub_key_string) pub_key = rsa.PublicKey.load_pkcs1_openssl_pem( pub_key_content.encode()) cls._host_public_key[host_url] = pub_key return pub_key
def get_display_name(nick): try: data = utils.http_request("https://api.twitch.tv/kraken/users/%s" % nick) data = json.loads(data) return data['display_name'] except: return nick
def download_data(self, *args, **kwargs): options = kwargs options.update({'apikey': ALPHA_VANTAGE_API_KEY}) params = urllib.urlencode(options) status, data = http_request(url=AV_URL, params=params, ishttps=True, url2=AV_URL2) return status, data
def new_poll(lrrbot, conn, event, respond_to, multi, timeout, poll_id, title, options): """ Command: !poll N http://strawpoll.me/ID Command: !poll N TITLE: OPTION1; OPTION2 Command: !multipoll N TITLE: OPTION1; OPTION2 Section: misc Start a new Strawpoll poll. Post results in N seconds. Multiple polls can be active at the same time. """ if poll_id is not None: url = "http://strawpoll.me/api/v2/polls/%s" % poll_id data = json.loads(utils.http_request(url)) title = data["title"] else: if title is None: title = "LoadingReadyLive poll" if ';' in options: options = [option.strip() for option in options.split(';')] elif ',' in options: options = [option.strip() for option in options.split(',')] else: options = options.split() data = json.dumps({ "options": options, "title": title, "multi": multi is not None }) response = utils.http_request( "http://strawpoll.me/api/v2/polls", data, "POST", headers={"Content-Type": "application/json"}) poll_id = json.loads(response)["id"] if timeout is not None: timeout = int(timeout) else: timeout = DEFAULT_TIMEOUT end = time.time() + int(timeout) lrrbot.polls += [(end, title, poll_id, respond_to)] conn.privmsg( respond_to, "New poll: %s (http://strawpoll.me/%s): %s from now" % (title, poll_id, utils.nice_duration(timeout, 1)))
def check_polls(lrrbot, conn): now = time.time() for end, title, poll_id, respond_to in lrrbot.polls: if end < now: url = "https://strawpoll.me/api/v2/polls/%s" % poll_id data = json.loads(utils.http_request(url)) options = sorted(zip(data["options"], data["votes"]), key=lambda e: (e[1], random.random()), reverse=True) options = "; ".join(map(strawpoll_format, enumerate(options))) response = "Poll complete: %s: %s" % (data["title"], options) response = utils.shorten(response, 450) conn.privmsg(respond_to, response) lrrbot.polls = list(filter(lambda e: e[0] >= now, lrrbot.polls))
def _request(self, endpoint, **kwargs): response = http_request(uri="{}/{}".format(self.host(), endpoint.lstrip("/")), session=self._session, **kwargs) if self._login or "login" == endpoint: if "X-Auth-Token" in response.headers: self._session.headers.update( {"x-auth-token": response.headers["X-Auth-Token"]}) self._session.cookies.update(response.cookies) return response.json()
def check_polls(lrrbot, conn): now = time.time() for end, title, poll_id, respond_to in lrrbot.polls: if end < now: url = "http://strawpoll.me/api/v2/polls/%s" % poll_id data = json.loads(utils.http_request(url)) options = sorted(zip(data["options"], data["votes"]), key=lambda e: (e[1], random.random()), reverse=True) options = "; ".join(map(strawpoll_format, enumerate(options))) response = "Poll complete: %s: %s" % (data["title"], options) response = utils.shorten(response, 450) conn.privmsg(respond_to, response) lrrbot.polls = list(filter(lambda e: e[0] >= now, lrrbot.polls))
def twitch_videos(): next_data = {"offset": 0, "limit": 10, "broadcasts": "true"} last_length = 1 while last_length > 0: if next_data["offset"] in VIDEO_CACHE: videos = VIDEO_CACHE[next_data["offset"]] else: videos = json.loads(utils.http_request("https://api.twitch.tv/kraken/channels/%s/videos" % config["channel"], data = next_data))["videos"] VIDEO_CACHE[next_data["offset"]] = videos last_length = len(next_data) for video in videos: yield video next_data["offset"] += next_data["limit"] last_length = len(videos)
def get_upcoming_events(calendar, after=None): """ Get the next several events from the calendar. Will include the currently-happening events (if any) and a number of following events. Results are cached, so we get more events than we should need, so that if the first few events become irrelevant by the time the cache expires, we still have the data we need. (Technically, the API quota limits allow us to get the events, for both calendars, every 1.7 seconds... but still, caching on principle.) The "after" parameter allows overriding the reference time, for testing purposes. """ if after is None: after = datetime.datetime.now(datetime.timezone.utc) url = EVENTS_URL % urllib.parse.quote(calendar) data = { "maxResults": EVENT_COUNT, "orderBy": "startTime", "singleEvents": "true", "timeMin": after.strftime(DATE_FORMAT), "timeZone": config['timezone'].zone, "key": config['google_key'], } res = utils.http_request(url, data) res = json.loads(res) if 'error' in res: raise Exception(res['error']['message']) formatted_items = [] for item in res['items']: formatted_items.append({ "id": item['id'], "url": item['htmlLink'], "title": item['summary'], "creator": item['creator']['displayName'], "start": dateutil.parser.parse(item['start']['dateTime']), "end": dateutil.parser.parse(item['end']['dateTime']), "location": item.get('location'), }) return formatted_items
def twitch_videos(): next_data = {"offset": 0, "limit": 10, "broadcasts": "true"} last_length = 1 while last_length > 0: if next_data["offset"] in VIDEO_CACHE: videos = VIDEO_CACHE[next_data["offset"]] else: videos = json.loads( utils.http_request( "https://api.twitch.tv/kraken/channels/%s/videos" % config["channel"], data=next_data))["videos"] VIDEO_CACHE[next_data["offset"]] = videos last_length = len(next_data) for video in videos: yield video next_data["offset"] += next_data["limit"] last_length = len(videos)
def get_game(name, all=False): """ Get the game information for a particular game. For response object structure, see: https://github.com/justintv/Twitch-API/blob/master/v3_resources/search.md#example-response-1 May throw exceptions on network/Twitch error. """ search_opts = {"query": name, "type": "suggest", "live": "false"} res = utils.http_request("https://api.twitch.tv/kraken/search/games", search_opts) res = json.loads(res) if all: return res["games"] else: for game in res["games"]: if game["name"] == name: return game return None
def get_subscribers(channel=None, count=5, offset=None, latest=True): if channel is None: channel = config['channel'] if channel not in storage.data['twitch_oauth']: return None headers = { "Authorization": "OAuth %s" % storage.data['twitch_oauth'][channel], } data = { "limit": count, "direction": "desc" if latest else "asc", } if offset is not None: data['offset'] = offset res = utils.http_request("https://api.twitch.tv/kraken/channels/%s/subscriptions" % channel, headers=headers, data=data) subscriber_data = json.loads(res) return [ (sub['user']['display_name'], sub['user'].get('logo'), sub['created_at']) for sub in subscriber_data['subscriptions'] ]
def get_twitch_emotes_undocumented(): # This endpoint is not documented, however `/chat/emoticons` might be deprecated soon. data = utils.http_request( "https://api.twitch.tv/kraken/chat/emoticon_images") data = json.loads(data)["emoticons"] emotesets = {} for emote in data: regex = emote["code"] regex = regex.replace(r"\<\;", "<").replace(r"\>\;", ">").replace( r"\"\;", '"').replace(r"\&\;", "&") if re_just_words.match(regex): regex = r"\b%s\b" % regex emotesets.setdefault(emote["emoticon_set"], {})[emote["code"]] = { "regex": re.compile("(%s)" % regex), "html": '<img src="https://static-cdn.jtvnw.net/emoticons/v1/%s/1.0" alt="{0}" title="{0}">' % emote["id"] } return emotesets
def get_subscribers(channel=None, count=5, offset=None, latest=True): if channel is None: channel = config['channel'] if channel not in storage.data['twitch_oauth']: return None headers = { "Authorization": "OAuth %s" % storage.data['twitch_oauth'][channel], } data = { "limit": count, "direction": "desc" if latest else "asc", } if offset is not None: data['offset'] = offset res = utils.http_request( "https://api.twitch.tv/kraken/channels/%s/subscriptions" % channel, headers=headers, data=data) subscriber_data = json.loads(res) return [(sub['user']['display_name'], sub['user'].get('logo'), sub['created_at']) for sub in subscriber_data['subscriptions']]
def register(cls, identity, password, captcha="", invite="", host=()): """ Register user :param identity: User identity: email or mobile :param password: User password :param captcha: Captcha code :param invite: Invite code :return: Logged in <User> instance if succeed """ register_data = { "password": cls._rsa_encrypt(password), "confirm": "", "verifyCode": captcha, "inviteCode": invite, "sessionId": "", "token": "", "sig": "" } failed_message = {1: "Duplicate user identity: {}".format(identity)} register_data.update(cls._get_identity_dict(identity)) result = http_request(cls.base_url() + "/register", json=register_data).json() if check_code(result): _user = cls() _user.login(identity=identity, password=password) return _user try: message = failed_message[result["result"]] except KeyError: message = "unknown error returned in registration: {}".format( result["result"]) raise RegisterExcept(message)
def get_twitch_emotes(): data = utils.http_request("https://api.twitch.tv/kraken/chat/emoticons") data = json.loads(data)['emoticons'] emotesets = {} for emote in data: regex = emote['regex'] if regex == r"\:-?[\\/]": # Don't match :/ inside URLs regex = r"\:-?[\\/](?![\\/])" regex = regex.replace(r"\<\;", "<").replace(r"\>\;", ">").replace( r"\"\;", '"').replace(r"\&\;", "&") if re_just_words.match(regex): regex = r"\b%s\b" % regex regex = re.compile("(%s)" % regex) for image in emote['images']: html = '<img src="%s" width="%d" height="%d" alt="{0}" title="{0}">' % ( image['url'], image['width'], image['height']) emotesets.setdefault(image.get("emoticon_set"), {})[emote['regex']] = { "regex": regex, "html": html, } return emotesets
def get_game(name, all=False): """ Get the game information for a particular game. For response object structure, see: https://github.com/justintv/Twitch-API/blob/master/v3_resources/search.md#example-response-1 May throw exceptions on network/Twitch error. """ search_opts = { 'query': name, 'type': 'suggest', 'live': 'false', } res = utils.http_request("https://api.twitch.tv/kraken/search/games", search_opts) res = json.loads(res) if all: return res['games'] else: for game in res['games']: if game['name'] == name: return game return None
def download_csv(self, **kwargs): """ :param start_date: Retrieve data rows on and after the specified start date. :param end_date: Retrieve data rows up to and including the specified end date. :param dataset_code: Code identifying the dataset. :param collapse: Options are daily, weekly, monthly, quarterly, annual :param limit: Use limit=n to get the first n rows of the dataset. Use limit=1 to get just the latest row. :param order: Return data in ascending or descending order of date. Default is desc. :param transform: Perform elementary calculations on the data prior to downloading. Default is none. Options are diff, rdiff, cumul, and normalize :return: data in pandas dataframe format """ kwargs.update({'api_key': QUANDL_API_KEY}) params = urllib.urlencode(kwargs) URL2 = QUANDL_URL2.format(database_code=kwargs.get('database_code'), dataset_code=kwargs.get('dataset_code')) status, data = http_request(url=QUANDL_URL, params=params, ishttps=True, url2=URL2) return status, data
def main(): if twitch.get_info()["live"]: print("Stream is live.") return highlights = send_bot_command("get_data", {"key": "staged_highlights"}) if argv.test: print("Staged highlights: %r" % highlights) if highlights is None: highlights = [] highlights = list( filter(lambda e: e is not None, map(twitch_lookup, highlights))) if highlights == []: return token = get_oauth_token(["https://spreadsheets.google.com/feeds"]) headers = {"Authorization": "%(token_type)s %(access_token)s" % token} url = "https://spreadsheets.google.com/feeds/worksheets/%s/private/full" % SPREADSHEET tree = xml.dom.minidom.parseString(utils.http_request(url, headers=headers)) worksheet = next(iter(tree.getElementsByTagName("entry"))) list_feed = find_schema( worksheet, "http://schemas.google.com/spreadsheets/2006#listfeed") if list_feed is None: print("List feed missing.") return list_feed = xml.dom.minidom.parseString( utils.http_request(list_feed, headers=headers)) post_url = find_schema(list_feed, "http://schemas.google.com/g/2005#post") if post_url is None: print("POST URL missing.") return for highlight in highlights: doc = xml.dom.minidom.getDOMImplementation().createDocument( None, "entry", None) root = doc.documentElement root.setAttribute("xmlns", "http://www.w3.org/2005/Atom") root.setAttribute( "xmlns:gsx", "http://schemas.google.com/spreadsheets/2006/extended") root.appendChild(new_field(doc, "SHOW", highlight["title"])) root.appendChild( new_field(doc, "QUOTE or MOMENT", highlight["description"])) root.appendChild(new_field(doc, "YOUTUBE VIDEO LINK", highlight["url"])) root.appendChild( new_field(doc, "ROUGH TIME THEREIN", "before " + utils.nice_duration(highlight["time"], 0))) root.appendChild( new_field(doc, "NOTES", "From chat user '%s'." % highlight["user"])) if argv.test: print("Command: %s" % doc.toxml()) else: headers["Content-Type"] = "application/atom+xml" utils.http_request(post_url, headers=headers, data=doc.toxml(), method="POST") if not argv.test: send_bot_command("set_data", {"key": "staged_highlights", "value": []})