Exemplo n.º 1
0
def main(do_download):
    """The main function, does the whole thing."""
    start_time = time.time()
    cfg = cfgreader.CfgReader(__file__.replace('.py', '.cfg'))
    cache = __file__.replace('.py', '.csv')
    if do_download:
        csv_data = download(cfg)
        logging.debug("Downloaded latest streaming activity.")
        with open(cache, "w", encoding="utf-8") as f:
            f.write(csv_data)
    else:
        logging.debug("Using cached streaming activity.")

    shows = list()
    with open(cache, newline='') as f:
        reader = csv.reader(f)
        t, d = next(reader)
        assert(t == 'Title' and d == 'Date')
        guid = 1000
        for title, show_date in reader:
            url = 'https://www.netflix.com/search?q=' + urllib.parse.quote(title)
            shows.append(Show(title, url, title + str(guid), show_date))
            guid += 1
    shows.sort(reverse=True)
    update_status = write_feed(shows[:20], cfg)
    logging.info(f"{time.time() - start_time:2.0f}s {update_status}")
Exemplo n.º 2
0
def main(do_download: bool) -> None:
    """The main function, does the whole thing."""
    start_time = time.time()
    cfg = cfgreader.CfgReader(__file__.replace('.py', '.cfg'))
    cache = __file__.replace('.py', '.opml')
    if do_download and not rate_limited(cache):
        opml = download(cfg)
        logging.debug("Downloaded latest episode activity.")
        with open(cache, "w", encoding="utf-8") as f:
            f.write(opml)
        root = ET.fromstring(bytes(opml, encoding="utf-8"))
    else:
        logging.debug("Using cached episode activity.")
        root = ET.parse(cache)

    episodes: List[Episode] = list()
    for rss in root.findall('.//outline[@type="rss"]'):
        rss_title = rss.attrib['title']
        for ep in rss.findall('outline[@type="podcast-episode"]'):
            if add_episode(ep):
                episodes.append(Episode(rss_title, ep.attrib['title'], ep.attrib['url'],
                    ep.attrib['overcastUrl'], ep.attrib['userUpdatedDate'],
                    'progress' in ep.attrib))
    episodes.sort(reverse=True)

    # I'm seeing too many duplicate partial posts. Experiment: Try not listing
    # the most recent episode if it has only been partially heard.
    # It'll likely get listed later.
    if episodes[0].partial:
        episodes.pop(0)

    episodes = reconcile_with_feed(episodes[:20], cfg.feed.href)

    update_status = write_feed(episodes, cfg)
    logging.info(f"{time.time() - start_time:2.0f}s {update_status}")
Exemplo n.º 3
0
import sys
import os
import time
import traceback
from argparse import ArgumentParser
import imaplib
import email
from email.header import decode_header
import hashlib
import cfgreader
import logging
import html
from typing import Callable, List, Tuple

# Read in custom configurations
g_cfg = cfgreader.CfgReader(__file__.replace('.py', '.cfg'))

# These two strings will form the header and individual
# items of the RSS feed.
feed_header = """<?xml version="1.0" encoding="utf-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
<channel>
<title>Emails for %s</title>
<link>%s</link>
<atom:link href="http://%s/%s.xml" rel="self" type="application/rss+xml" />
<pubDate>%%s</pubDate>
<description>Feed automatically generated by %s's %s</description>
<language>en-us</language>
""" % (g_cfg.main.name, g_cfg.imap.webmail, g_cfg.main.url_base,
       g_cfg.main.rss_base, g_cfg.main.url_base, os.path.basename(__file__))