def _digest_episode(feed_episode): debug("Digesting episode") episode_link = feed_episode.a['href'] # Get data num_match = _episode_re.match(episode_link) num_match_alter = _episode_re_alter.match(episode_link) if num_match: num = int(num_match.group(1)) elif num_match_alter: warning("Using alternate episode key format") num = int(num_match_alter.group(1)) else: warning("Unknown episode number format") return None if num <= 0: return None name = feed_episode.h3.text name_match = _episode_name_correct.match(name) if name_match: debug(f" Corrected title from {name}") name = name_match.group(1) if _episode_name_invalid.match(name): warning(f" Episode title not found") name = None link = episode_link date = datetime.utcnow() # Not included in stream ! return Episode(num, name, link, date)
def _digest_episode(feed_episode): debug("Digesting episode") # Get data num_match = _episode_count_fix.match( feed_episode.crunchyroll_episodenumber) if num_match: num = int(num_match.group(1)) else: warning("Unknown episode number format \"{}\"".format( feed_episode.crunchyroll_episodenumber)) num = 0 debug(" num={}".format(num)) name = feed_episode.title match = _episode_name_correct.match(name) if match: debug(" Corrected title from \"{}\"".format(name)) name = match.group(1) debug(" name={}".format(name)) link = feed_episode.link debug(" link={}".format(link)) date = feed_episode.published_parsed debug(" date={}".format(date)) return Episode(num, name, link, date)
def _digest_episode(feed_episode): title = feed_episode["title"] episode_num = _extract_episode_num(title) if episode_num is not None: date = feed_episode["published_parsed"] or datetime.utcnow() link = feed_episode["id"] return Episode(episode_num, None, link, date) return None
def _digest_episode(feed_episode): title = feed_episode["title"] episode_num = _extract_episode_num(title) if episode_num: # Intended, checks if not None and > 0 date = feed_episode["published_parsed"] link = feed_episode["id"] return Episode(episode_num, None, link, date) return None
def _digest_episode(feed_episode): title = feed_episode["title"] debug("Extracting episode number from \"{}\"".format(title)) episode_num = _extract_episode_num(title) if episode_num is not None: debug(" Match found, num={}".format(episode_num)) date = feed_episode["published_parsed"] or datetime.utcnow() link = feed_episode["id"] return Episode(episode_num, None, link, date) debug(" No match found") return None
def _digest_episode(feed_episode): debug("Digesting episode") name = feed_episode.find("h4", itemprop="name", class_="episode__title").text link = feed_episode.find("a", itemprop="url", class_="episode__link").href num = int(feed_episode.find("meta", itemprop="episodeNumber")["content"]) date_string = feed_episode.find("meta", itemprop="dateCreated")["content"] date = datetime.fromordinal(dateutil.parser.parse(date_string).toordinal()) return Episode(num, name, link, date)
def get_all_episodes(self, stream, **kwargs): info("Getting episodes for Daisuki/{}".format(stream.show_id)) episodes = [] response = self.request(url, xml=True, **kwargs) for movieset in response.findall("movieset"): for items in movieset.findall("items"): for item in items.findall("item"): episodes.append(Episode(item.get("chapter"), None, _episode_url.format(show=stream.show_key, ep=item.get("productid")), datetime.utcnow())) return episodes
def _digest_episode(self, feed_episode, stream): debug("Digesting episode") # Get data num = feed_episode["number"] debug(" num={}".format(num)) name = feed_episode["show_name"] debug(" name={}".format(name)) link = self._episode_url.format(show_slug=stream.show_key, ep_slug=feed_episode["url"]) debug(" link={}".format(link)) date = datetime.strptime(feed_episode["releaseDate"], "%Y/%m/%d") debug(" date={}".format(date)) return Episode(num, name, link, date)
def _digest_episode(feed_episode): _video_url = "https://www.youtube.com/watch?v={video_id}" snippet = feed_episode["snippet"] title = snippet["title"] episode_num = _extract_episode_num(title) if episode_num is None or not 0 < episode_num < 720: return None date_string = snippet["publishedAt"].replace('Z', '') #date_string = snippet["publishedAt"].replace('Z', '+00:00') # Use this for offset-aware dates date = datetime.fromisoformat(date_string) or datetime.utcnow() link = _video_url.format(video_id=snippet["resourceId"]["videoId"]) return Episode(episode_num, None, link, date)
def _digest_episode(self, feed_episode, stream): debug("Digesting episode") # Get data content = feed_episode.find("content").find("metadata") num_text = content.find("recentContentItem").text num_match = self._re_episode_num.match(num_text) if not num_match: error("recentContentItem episode has unknown format: \"{}\"".format(num_text)) num = int(num_match.group(1)) debug(" num={}".format(num)) name = None #feed_episode["show_name"] #FIXME debug(" name={}".format(name)) link = None #self._episode_url.format(show_slug=stream.show_key, ep_slug=feed_episode["url"]) #FIXME debug(" link={}".format(link)) #FIXME: content-metadata contains "<recentlyAdded>added {1458071999} ago"; could use timestamp date = datetime.now() #datetime.strptime(feed_episode["releaseDate"], "%Y/%m/%d") debug(" date={}".format(date)) return Episode(num, name, link, date)