def find_thread_by_board_thread_refno_with_posts( board: BoardModel, thread_refno: int) -> Optional[ThreadModel]: thread_cache = cache.get(cache_key('thread', board.name, thread_refno)) if not thread_cache: with session() as s: q = s.query(ThreadOrmModel) q = q.options(lazyload('posts')) q = q.filter(ThreadOrmModel.refno == thread_refno, ThreadOrmModel.board_id == BoardOrmModel.id, BoardOrmModel.name == board.name) thread_orm_model = q.one_or_none() if not thread_orm_model or not thread_orm_model.posts: return None # TODO: also load board in q above thread = ThreadModel.from_orm_model(thread_orm_model, include_board=True, include_posts=True) thread_cache = thread.to_cache(include_board=True, include_posts=True) cache.set(cache_key('thread', thread.board.name, thread.refno), thread_cache, timeout=0) return thread if thread_cache: return ThreadModel.from_cache(thread_cache) return None
def is_verified(verifying_client: VerifyingClient) -> bool: verification_model = None verification_model_cache = cache.get( cache_key('verifications', verifying_client.verification_id)) if verification_model_cache: verification_model = VerificationsModel.from_cache( verification_model_cache) if not verification_model: with session() as s: q = s.query(VerificationOrmModel) q = q.filter_by(verification_id=verifying_client.verification_id) verifications_orm_model = q.one_or_none() if verifications_orm_model: verification_model = VerificationsModel.from_orm_model( verifications_orm_model) cached = verification_model.to_cache() timeout = max(1, (verification_model.expires - now()) // 1000) cache.set(cache_key('verifications', verification_model.id), cached, timeout=timeout) s.commit() return verification_model and _is_verifications_valid( verifying_client, verification_model)
def find_by_type(page_type: str) -> 'List[PageModel]': _check_page_type(page_type) lc = local_cache.get(cache_key('type', page_type)) if lc: return list(map(lambda i: i.copy(), lc)) pages_by_type_cached = cache.get(cache_key('pages_by_type', page_type)) if pages_by_type_cached is not None: res = list(map(lambda i: PageModel.from_cache(i), pages_by_type_cached)) else: with session() as s: q = s.query(PageOrmModel).filter_by(type=page_type) q = q.order_by(asc(PageOrmModel.order)) res = list(map(lambda i: PageModel.from_orm_model(i), q.all())) cache.set(cache_key('pages_by_type', page_type), list(map(lambda i: i.to_cache(), res))) s.commit() local_cache.set(cache_key('type', page_type), res) return res
def _invalidate_thread_cache(s: Session, old_thread: ThreadModel, board: BoardModel): """ Update the memcache version of the specified thread. This will update the thread cache, and the thread stub cache. """ key = cache_key('thread', board.name, old_thread.refno) stub_key = cache_key('thread_stub', board.name, old_thread.refno) # Reuse the parsed html from the old cache. old_thread_posts_cache = cache.get(key) old_thread_posts = None if old_thread_posts_cache: old_thread_posts = ThreadModel.from_cache(old_thread_posts_cache).posts # Next, query all the new posts q = s.query(ThreadOrmModel) q = q.filter_by(id=old_thread.id) q = q.options(lazyload('posts')) res = q.one_or_none() if not res: cache.delete(key) cache.delete(stub_key) return thread = ThreadModel.from_orm_model(res, include_board=True, include_posts=True, cached_thread_posts=old_thread_posts) thread_cache = thread.to_cache(include_board=True, include_posts=True) cache.set(key, thread_cache, timeout=0) thread_stub = ThreadStubModel.from_thread(thread, include_snippets=True) thread_stub_cache = thread_stub.to_cache() cache.set(stub_key, thread_stub_cache, timeout=0) return thread, thread_stub
def get_catalog(board: BoardModel) -> CatalogModel: catalog_cache = cache.get(cache_key('board', board.name)) if not catalog_cache: with session() as s: catalog, board_pages = _invalidate_board_pages_catalog_cache(s, board) return catalog return CatalogModel.from_cache(catalog_cache)
def get_board_page(board: BoardModel, page: int) -> BoardPageModel: board_page_cache = cache.get(cache_key('board', board.name, page)) if not board_page_cache: with session() as s: catalog, board_pages = _invalidate_board_pages_catalog_cache(s, board) return board_pages[page] return BoardPageModel.from_cache(board_page_cache)
def get_catalog(board: BoardModel) -> CatalogModel: catalog_cache = cache.get(cache_key('board', board.name)) if not catalog_cache: with session() as s: catalog, board_pages = _invalidate_board_pages_catalog_cache( s, board) return catalog return CatalogModel.from_cache(catalog_cache)
def get_board_page(board: BoardModel, page: int) -> BoardPageModel: board_page_cache = cache.get(cache_key('board', board.name, page)) if not board_page_cache: with session() as s: catalog, board_pages = _invalidate_board_pages_catalog_cache( s, board) return board_pages[page] return BoardPageModel.from_cache(board_page_cache)
def _invalidate_board_pages_catalog_cache(s: Session, board: BoardModel): """ Update the memcache version of the specified board. This will update the board pages from the already cached thread stubs, and create a new catalog cache. """ q = s.query(ThreadOrmModel) q = q.filter(ThreadOrmModel.board_id == board.id) threads_orm = q.all() thread_models = list(map(lambda j: ThreadModel.from_orm_model(j, ), threads_orm)) # This builds the board index, stickies first, oldest first, then normal posts, newest first. # The pages are split accordingly to the board config, # and the catalog is build from only the ops. stickies = [] threads = [] for thread in thread_models: thread_stub_cache = cache.get(cache_key('thread_stub', board.name, thread.refno)) if not thread_stub_cache: thread, thread_stub = _invalidate_thread_cache(s, thread, board) # The board and thread selects are done separately and there is thus the # possibility that the thread was removed after the board select if thread_stub is None: continue else: thread_stub = ThreadStubModel.from_cache(thread_stub_cache) stickies.append(thread_stub) if thread_stub.sticky else threads.append(thread_stub) stickies = sorted(stickies, key=lambda t: t.last_modified, reverse=False) threads = sorted(threads, key=lambda t: t.last_modified, reverse=True) all_thread_stubs = stickies + threads # The catalog is a CatalogModel with ThreadStubs with only OP's catalog = CatalogModel.from_board_thread_stubs(board, all_thread_stubs) catalog_cache = catalog.to_cache() cache.set(cache_key('board', board.name), catalog_cache, timeout=0) # All threads with stubs, divided per page # note: there is the possibility that concurrent processes updating this cache # mess up the order / create duplicates of the page threads # this chance is however very low, and has no ill side effects except for # a visual glitch board_pages = [] for i in range(board.config.pages): from_index = i * board.config.per_page to_index = (i + 1) * board.config.per_page board_page = BoardPageModel.from_page_thread_stubs(i, all_thread_stubs[from_index:to_index]) board_pages.append(board_page) board_page_cache = board_page.to_cache() cache.set(cache_key('board', board.name, i), board_page_cache, timeout=0) return catalog, board_pages
def find_by_name(name: str) -> Optional[BoardModel]: if not validation.check_board_name_validity(name): raise ArgumentError(MESSAGE_INVALID_NAME) board_cache = cache.get(cache_key('board_and_config', name)) if not board_cache: with session() as s: q = s.query(BoardOrmModel).filter_by(name=name) q = q.options(joinedload('config')) board_orm_model = q.one_or_none() if not board_orm_model: return None board = BoardModel.from_orm_model(board_orm_model, include_config=True) cache.set(cache_key('board_and_config', name), board.to_cache()) return board return BoardModel.from_cache(board_cache)
def find_thread_by_board_name_thread_refno(board_name: str, thread_refno: int) -> Optional[ThreadModel]: thread_cache = cache.get(cache_key('thread', board_name, thread_refno)) if not thread_cache: with session() as s: q = s.query(ThreadOrmModel) q = q.filter(ThreadOrmModel.refno == thread_refno, ThreadOrmModel.board_id == BoardOrmModel.id, BoardOrmModel.name == board_name) thread_orm_model = q.one_or_none() if not thread_orm_model: return None # TODO: also load board in q above thread = ThreadModel.from_orm_model(thread_orm_model, include_board=True) return thread if thread_cache: return ThreadModel.from_cache(thread_cache) return None
def get_site() -> SiteConfigModel: local_cached = local_site_config_cache.get('site_config') if local_cached: return local_cached.copy() cached = cache.get(cache_key('config_site')) if cached: res = SiteConfigModel.from_cache(cached) else: with session() as s: m = s.query(ConfigOrmModel).filter_by(type='site').one_or_none() if m: res = SiteConfigModel.from_orm_model(m) else: res = SiteConfigModel.from_defaults() s.commit() cache.set(cache_key('config_site'), res.to_cache()) local_site_config_cache.set('site_config', res) return res
def is_verified(verifying_client: VerifyingClient) -> bool: verification_model = None verification_model_cache = cache.get(cache_key('verifications', verifying_client.verification_id)) if verification_model_cache: verification_model = VerificationsModel.from_cache(verification_model_cache) if not verification_model: with session() as s: q = s.query(VerificationOrmModel) q = q.filter_by(verification_id=verifying_client.verification_id) verifications_orm_model = q.one_or_none() if verifications_orm_model: verification_model = VerificationsModel.from_orm_model(verifications_orm_model) cached = verification_model.to_cache() timeout = max(1, (verification_model.expires - now()) // 1000) cache.set(cache_key('verifications', verification_model.id), cached, timeout=timeout) s.commit() return verification_model and _is_verifications_valid(verifying_client, verification_model)
def find_by_names(names: List[str]) -> List[BoardModel]: """unknown names are ignored!""" for name in names: if not validation.check_board_name_validity(name): raise ArgumentError(MESSAGE_INVALID_NAME) boards = [] with session() as s: for name in names: board_cache = cache.get(cache_key('board_and_config', name)) if board_cache: boards.append(BoardModel.from_cache(board_cache)) else: board_orm_model = s.query(BoardOrmModel).filter_by(name=name).one_or_none() if board_orm_model: board = BoardModel.from_orm_model(board_orm_model, include_config=True) cache.set(cache_key('board_and_config', name), board.to_cache()) boards.append(board) return boards
def get_all_board_names() -> List[str]: local_cached = local_cache.get('all_board_names') if local_cached: return local_cached all_board_names_cached = cache.get(cache_key('all_board_names')) if all_board_names_cached is not None: # No need to map a list of strings res = all_board_names_cached else: with session() as s: q = s.query(BoardOrmModel).options(load_only('name')).order_by(BoardOrmModel.name) # No mapping here either res = list(map(lambda i: i.name, q.all())) s.commit() cache.set(cache_key('all_board_names'), res) local_cache.set('all_board_names', res) return res
def find_by_link_name(link_name: str) -> PageModel: lc = local_cache.get(cache_key('link_name', link_name)) if lc: return lc.copy() page_cached = cache.get(cache_key('page_by_link_name', link_name)) if page_cached: return PageModel.from_cache(page_cached) else: with session() as s: m = s.query(PageOrmModel).filter_by(link_name=link_name).one_or_none() res = None if m: res = PageModel.from_orm_model(m) cache.set(cache_key('page_by_link_name', res.link_name), res.to_cache()) if res: local_cache.set(cache_key('link_name', link_name), res) return res
def find_thread_by_board_name_thread_refno( board_name: str, thread_refno: int) -> Optional[ThreadModel]: thread_cache = cache.get(cache_key('thread', board_name, thread_refno)) if not thread_cache: with session() as s: q = s.query(ThreadOrmModel) q = q.filter(ThreadOrmModel.refno == thread_refno, ThreadOrmModel.board_id == BoardOrmModel.id, BoardOrmModel.name == board_name) thread_orm_model = q.one_or_none() if not thread_orm_model: return None # TODO: also load board in q above thread = ThreadModel.from_orm_model(thread_orm_model, include_board=True) return thread if thread_cache: return ThreadModel.from_cache(thread_cache) return None
def find_by_link_name(link_name: str) -> PageModel: lc = local_cache.get(cache_key('link_name', link_name)) if lc: return lc.copy() page_cached = cache.get(cache_key('page_by_link_name', link_name)) if page_cached: return PageModel.from_cache(page_cached) else: with session() as s: m = s.query(PageOrmModel).filter_by( link_name=link_name).one_or_none() res = None if m: res = PageModel.from_orm_model(m) cache.set(cache_key('page_by_link_name', res.link_name), res.to_cache()) if res: local_cache.set(cache_key('link_name', link_name), res) return res
def find_thread_by_board_thread_refno_with_posts(board: BoardModel, thread_refno: int) -> Optional[ThreadModel]: thread_cache = cache.get(cache_key('thread', board.name, thread_refno)) if not thread_cache: with session() as s: q = s.query(ThreadOrmModel) q = q.options(lazyload('posts')) q = q.filter(ThreadOrmModel.refno == thread_refno, ThreadOrmModel.board_id == BoardOrmModel.id, BoardOrmModel.name == board.name) thread_orm_model = q.one_or_none() if not thread_orm_model or not thread_orm_model.posts: return None # TODO: also load board in q above thread = ThreadModel.from_orm_model(thread_orm_model, include_board=True, include_posts=True) thread_cache = thread.to_cache(include_board=True, include_posts=True) cache.set(cache_key('thread', thread.board.name, thread.refno), thread_cache, timeout=0) return thread if thread_cache: return ThreadModel.from_cache(thread_cache) return None
def _invalidate_board_pages_catalog_cache(s: Session, board: BoardModel): """ Update the memcache version of the specified board. This will update the board pages from the already cached thread stubs, and create a new catalog cache. """ q = s.query(ThreadOrmModel) q = q.filter(ThreadOrmModel.board_id == board.id) threads_orm = q.all() thread_models = list( map(lambda j: ThreadModel.from_orm_model(j, ), threads_orm)) # This builds the board index, stickies first, oldest first, then normal posts, newest first. # The pages are split accordingly to the board config, # and the catalog is build from only the ops. stickies = [] threads = [] for thread in thread_models: thread_stub_cache = cache.get( cache_key('thread_stub', board.name, thread.refno)) if not thread_stub_cache: thread, thread_stub = _invalidate_thread_cache(s, thread, board) # The board and thread selects are done separately and there is thus the # possibility that the thread was removed after the board select if thread_stub is None: continue else: thread_stub = ThreadStubModel.from_cache(thread_stub_cache) stickies.append(thread_stub) if thread_stub.sticky else threads.append( thread_stub) stickies = sorted(stickies, key=lambda t: t.last_modified, reverse=False) threads = sorted(threads, key=lambda t: t.last_modified, reverse=True) all_thread_stubs = stickies + threads # The catalog is a CatalogModel with ThreadStubs with only OP's catalog = CatalogModel.from_board_thread_stubs(board, all_thread_stubs) catalog_cache = catalog.to_cache() cache.set(cache_key('board', board.name), catalog_cache, timeout=0) # All threads with stubs, divided per page # note: there is the possibility that concurrent processes updating this cache # mess up the order / create duplicates of the page threads # this chance is however very low, and has no ill side effects except for # a visual glitch board_pages = [] for i in range(board.config.pages): from_index = i * board.config.per_page to_index = (i + 1) * board.config.per_page board_page = BoardPageModel.from_page_thread_stubs( i, all_thread_stubs[from_index:to_index]) board_pages.append(board_page) board_page_cache = board_page.to_cache() cache.set(cache_key('board', board.name, i), board_page_cache, timeout=0) return catalog, board_pages