def test_get_set_blocking(self) -> None: limiter = ratelimit.APIRateLimiter(blocking=True) self.assertTrue(limiter.get_blocking()) limiter.set_blocking(False) self.assertFalse(limiter.get_blocking()) limiter = ratelimit.APIRateLimiter(blocking=False) self.assertFalse(limiter.get_blocking()) limiter.set_blocking(True) self.assertTrue(limiter.get_blocking())
def test_set_remaining_decrease(self) -> None: limiter = ratelimit.APIRateLimiter(blocking=False) limiter.set_remaining(100) for _ in range(100): limiter() with self.assertRaises(ratelimit.WouldBlock): limiter()
def test_calls_age_out(self) -> None: limiter = ratelimit.APIRateLimiter(period=0.25, blocking=False) for _ in range(150): limiter() time.sleep(0.25) # Should not block limiter()
def test_max_calls_nonblocking(self) -> None: limiter = ratelimit.APIRateLimiter(blocking=False) for _ in range(150): limiter() # Next call should block with self.assertRaises(ratelimit.WouldBlock): limiter()
def test_max_calls_blocking(self) -> None: limiter = ratelimit.APIRateLimiter(period=0.25) start = time.monotonic() for _ in range(150): limiter() limiter() end = time.monotonic() self.assertGreaterEqual(end - start, 0.25)
def test_set_blocking_from_thread(self) -> None: limiter = ratelimit.APIRateLimiter(blocking=True) for _ in range(150): limiter() # Next call should block # Set up a thread to change us to non-blocking after a wait time def thread(): time.sleep(0.1) limiter.set_blocking(False) threading.Thread(target=thread).start() with self.assertRaises(ratelimit.WouldBlock): limiter()
def test_nonblocking(self) -> None: self.mock_adapter.register_uri("post", "https://api.broadcasthe.net/", json=self.dummy_response) rate_limiter = ratelimit.APIRateLimiter(blocking=False) api = api_lib.RateLimitedAPI("dummy_key", rate_limiter, session=self.session) # Shouldn't block for first N calls for _ in range(150): api.getTorrents() # Next call should block with self.assertRaises(ratelimit.RequestWouldBlock): api.getTorrents()
def test_call_limit_exceeded(self) -> None: error_response = api_types.Response( id="dummy_id", error=api_types.Error( message="Call Limit Exceeded", code=api_types.ErrorCode.CALL_LIMIT_EXCEEDED, ), ) self.mock_adapter.register_uri("post", "https://api.broadcasthe.net/", json=error_response) rate_limiter = ratelimit.APIRateLimiter(blocking=False) api = api_lib.RateLimitedAPI("dummy_key", rate_limiter, session=self.session) with self.assertRaises(api_lib.CallLimitExceededError): api.getTorrents() # Next call should block with self.assertRaises(ratelimit.RequestWouldBlock): api.getTorrents()
def main() -> None: parser = argparse.ArgumentParser() parser.add_argument("--verbose", "-v", action="count") parser.add_argument("--path", type=pathlib.Path, required=True) parser.add_argument( "--disable", action="append", choices=("metadata", "metadata_tip", "snatchlist"), ) parser.add_argument("--api_max_calls", type=int, default=150) parser.add_argument("--api_period", type=int, default=3600) parser.add_argument("--web_request_rate", type=float, default=0.2) parser.add_argument("--web_request_burst", type=float, default=10) parser.add_argument("--snatchlist_period", type=float, default=3600) parser.add_argument("--parent", type=int) args = parser.parse_args() if args.verbose: level = logging.DEBUG else: level = logging.INFO logging.basicConfig( stream=sys.stdout, level=level, format="%(asctime)s %(levelname)s %(threadName)s " "%(filename)s:%(lineno)d %(message)s", ) storage = storage_lib.Storage(args.path) session = requests.Session() rate_limiter = ratelimit.RateLimiter(rate=args.web_request_rate, burst=args.web_request_burst) api_rate_limiter = ratelimit.APIRateLimiter(max_calls=args.api_max_calls, period=args.api_period) auth = storage.get_user_auth() user_access = site.UserAccess(auth=auth, session=session, rate_limiter=rate_limiter) if auth.api_key is None: raise ValueError("api_key is required") api = api_lib.RateLimitedAPI(auth.api_key, rate_limiter=api_rate_limiter, session=session) def metadata_factory() -> sqlite3.Connection: conn = sqlite3.Connection(storage.metadata_db_path, isolation_level=None) cur = conn.cursor() cur.execute("pragma busy_timeout = 5000") # Metadata updates use temp tables with small data sizes cur.execute("pragma temp_store = MEMORY") cur.execute("pragma trusted_schema = OFF") cur.execute("pragma journal_mode = WAL") cur.execute(f"pragma mmap_size = {2**32}") cur.execute("pragma synchronous = NORMAL") return conn metadata_pool = dbver.null_pool(metadata_factory) def user_factory() -> sqlite3.Connection: conn = sqlite3.Connection(storage.user_db_path, isolation_level=None) cur = conn.cursor() cur.execute("pragma busy_timeout = 5000") cur.execute("pragma trusted_schema = OFF") cur.execute("pragma journal_mode = WAL") cur.execute(f"pragma mmap_size = {2**28}") cur.execute("pragma synchronous = NORMAL") return conn user_pool = dbver.null_pool(user_factory) disable: Set[list] = set(args.disable) if args.disable else set() daemons: Dict[str, daemon_lib.Daemon] = {} if "metadata" not in disable: daemons["metadata_scraper"] = scrape.MetadataScraper( api=api, metadata_pool=metadata_pool) if "metadata_tip" not in disable: daemons["metadata_tip_scraper"] = scrape.MetadataTipScraper( api=api, user_access=user_access, metadata_pool=metadata_pool) if "snatchlist" not in disable: daemons["snatchlist_scraper"] = scrape.SnatchlistScraper( api=api, user_pool=user_pool, period=args.snatchlist_period) if args.parent: daemons["parent_checker"] = ParentChecker(args.parent) executor = concurrent.futures.ThreadPoolExecutor(max_workers=8) def signal_handler(signum: int, _: Any) -> None: _LOG.info("terminating due to signal %d", signum) for daemon in daemons.values(): daemon.terminate() try: # Set signal handlers within the try-finally, so we'll be sure to unset # them if we get a signal while setting them signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) futures: List[concurrent.futures.Future] = [] for name, daemon in daemons.items(): executor = concurrent.futures.ThreadPoolExecutor( thread_name_prefix=name) futures.append(executor.submit(daemon.run)) # Wait for any daemon to die or be terminated concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED) # Ensure all daemons are terminated; all are killed if one dies for daemon in daemons.values(): daemon.terminate() # Re-raise any exceptions for future in futures: future.result() finally: signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
def test_set_remaining_zero(self) -> None: limiter = ratelimit.APIRateLimiter(blocking=False) limiter.set_remaining(0) with self.assertRaises(ratelimit.WouldBlock): limiter()