def index() -> Results: from . import hpi from my.reddit import submissions, comments, saved, upvoted # TODO should probably use markdown parser here? logger.info('processing saves') for s in saved(): try: yield from _from_save(s) except Exception as e: yield e logger.info('processing comments') for c in comments(): try: yield from _from_comment(c) except Exception as e: yield e logger.info('processing submissions') for sub in submissions(): try: yield from _from_submission(sub) except Exception as e: yield e logger.info('processing upvotes') for u in upvoted(): try: yield from _from_upvote(u) except Exception as e: yield e
def test_basic() -> None: # todo maybe this should call stat or something instead? # would ensure reasonable stat implementation as well and less duplication # note: deliberately use old module (instead of my.reddit.all) to test bwd compatibility from my.reddit import saved, events assert len(list(events())) > 0 assert len(list(saved())) > 0
def test_saves() -> None: from my.reddit.all import saved saves = list(saved()) assert len(saves) > 0 # just check that they are unique (makedict will throw) from my.core.common import make_dict make_dict(saves, key=lambda s: s.sid)
def get_items(self): for s in saved(): yield s.sid, node( # need to make heading lazy due to is_alive # eh, can't guess the type of lambda?? heading=lambda s=s: dt_heading( # type: ignore[misc] s.created, ('[#A] *DEAD*' if self.is_dead_url(s.url) else '') + link(title=s.title, url=s.url) + f' /r/{s.subreddit}' ), body=s.text, )
def index(*, render_markdown: bool = False, renderer: Optional['RedditRenderer'] = None) -> Results: from . import hpi from my.reddit import submissions, comments, saved, upvoted if renderer is not None: assert callable( renderer ), f"{renderer} is not a callable (should be a subclass of RedditRenderer)" r = renderer(render_markdown=render_markdown) else: r = RedditRenderer(render_markdown=render_markdown) logger.info('processing saves') for s in saved(): try: yield from r._from_save(s) except Exception as e: yield e logger.info('processing comments') for c in comments(): try: yield from r._from_comment(c) except Exception as e: yield e logger.info('processing submissions') for sub in submissions(): try: yield from r._from_submission(sub) except Exception as e: yield e logger.info('processing upvotes') for u in upvoted(): try: yield from r._from_upvote(u) except Exception as e: yield e
def test() -> None: from my.reddit import events, inputs, saved list(events()) list(saved())
def test_saves() -> None: from my.reddit import events, inputs, saved # TODO not sure if this is necesasry anymore? saves = list(saved()) # just check that they are unique.. make_dict(saves, key=lambda s: s.sid)
def test_reddit_has_data() -> None: assert ilen(events(parallel=False)) > 10 assert ilen(saved()) > 10 assert ilen(inputs()) >= 1 assert ilen(pushshift_comments()) > ilen(_dal().comments())
def test_saves() -> None: # TODO not sure if this is necesasry anymore? saves = list(saved()) # just check that they are unique.. assert len(make_dict(saves, key=lambda s: s.sid)) > 10
def test() -> None: list(events()) list(saved())