def test_in_memory_reset(self): storage = MemoryStorage() limiter = FixedWindowRateLimiter(storage) per_min = RateLimitItemPerMinute(10) for i in range(0,10): self.assertTrue(limiter.hit(per_min)) self.assertFalse(limiter.hit(per_min)) storage.reset() for i in range(0,10): self.assertTrue(limiter.hit(per_min)) self.assertFalse(limiter.hit(per_min))
def test_in_memory_reset(self): storage = MemoryStorage() limiter = FixedWindowRateLimiter(storage) per_min = RateLimitItemPerMinute(10) for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) self.assertFalse(limiter.hit(per_min)) storage.reset() for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) self.assertFalse(limiter.hit(per_min))
async def before_server_start(app_: Sanic, loop): mzk.set_process_name(f'MocaFileLog({core.VERSION}) --- {app_._log_name}') mzk.print_info(f'Starting Sanic server. -- {mzk.get_my_pid()}') app_.system_config: mzk.MocaConfig = mzk.MocaConfig(core.SYSTEM_CONFIG, manual_reload=True) app_.ip_blacklist: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile( core.IP_BLACKLIST_FILE, manual_reload=True, remove_duplicates=True, ) app_.api_key_config: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile( core.API_KEY_FILE, manual_reload=True) app_.dict_cache = {} if app_._log_file_path.startswith('/'): file = app_._log_file_path else: file = core.CLIENT_LOG_DIR.joinpath(app_._log_file_path) app_.moca_log = mzk.MocaFileLog(file, app_._log_level) app_.secure_log = mzk.MocaFileLog(core.LOG_DIR.joinpath('secure.log')) app_.scheduler = mzk.MocaScheduler() app_.log_list = [] if core.SERVER_CONFIG['rate_limiter_redis_storage'] is None: app_._storage_for_rate_limiter = MemoryStorage() else: app_._storage_for_rate_limiter = RedisStorage( core.SERVER_CONFIG['rate_limiter_redis_storage']) app_.rate_limiter = FixedWindowElasticExpiryRateLimiter( app_._storage_for_rate_limiter) if core.LOG_CONFIG[app_._log_name].get('google_spread_sheets_auth', None) is not None: scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] app_._credentials = ServiceAccountCredentials.from_json_keyfile_name( str( core.CONFIG_DIR.joinpath(core.LOG_CONFIG[app_._log_name].get( 'google_spread_sheets_auth'))), scope, ) app_._gc = authorize(app_._credentials) app_.workbook = app_._gc.open_by_key( core.LOG_CONFIG[app_._log_name].get('spread_sheets_key')) else: app_.workbook = None def __reload_timer(application: Sanic) -> None: while True: mzk.sleep(1) application.system_config.reload_file() application.ip_blacklist.reload_file() application.api_key_config.reload_file() app_._timer_thread = Thread(target=__reload_timer, args=(app_, ), daemon=True) app_._timer_thread.start()
def init_app(self, app): """ :param app: :class:`flask.Flask` instance to rate limit. """ self.enabled = app.config.setdefault(C.ENABLED, True) self._swallow_errors = app.config.setdefault( C.SWALLOW_ERRORS, self._swallow_errors ) self._headers_enabled = ( self._headers_enabled or app.config.setdefault(C.HEADERS_ENABLED, False) ) self._storage_options.update( app.config.get(C.STORAGE_OPTIONS, {}) ) self._storage = storage_from_string( self._storage_uri or app.config.setdefault(C.STORAGE_URL, 'memory://'), ** self._storage_options ) strategy = ( self._strategy or app.config.setdefault(C.STRATEGY, 'fixed-window') ) if strategy not in STRATEGIES: raise ConfigurationError("Invalid rate limiting strategy %s" % strategy) self._limiter = STRATEGIES[strategy](self._storage) self._header_mapping.update({ HEADERS.RESET : self._header_mapping.get(HEADERS.RESET,None) or app.config.setdefault(C.HEADER_RESET, "X-RateLimit-Reset"), HEADERS.REMAINING : self._header_mapping.get(HEADERS.REMAINING,None) or app.config.setdefault(C.HEADER_REMAINING, "X-RateLimit-Remaining"), HEADERS.LIMIT : self._header_mapping.get(HEADERS.LIMIT,None) or app.config.setdefault(C.HEADER_LIMIT, "X-RateLimit-Limit"), }) conf_limits = app.config.get(C.GLOBAL_LIMITS, None) if not self._global_limits and conf_limits: self._global_limits = [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(conf_limits) ] fallback_limits = app.config.get(C.IN_MEMORY_FALLBACK, None) if not self._in_memory_fallback and fallback_limits: self._in_memory_fallback = [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(fallback_limits) ] if self._auto_check: app.before_request(self.__check_request_limit) app.after_request(self.__inject_headers) if self._in_memory_fallback: self._fallback_storage = MemoryStorage() self._fallback_limiter = STRATEGIES[strategy](self._fallback_storage) # purely for backward compatibility as stated in flask documentation if not hasattr(app, 'extensions'): app.extensions = {} # pragma: no cover app.extensions['limiter'] = self
def _setup_logger(self): self._logger = get_logger(self.name) # rate limit our own messages to not spam around in case of temporary network errors, etc rate_limit_setting = constants.ERROR_LOG_RATE_LIMIT if rate_limit_setting: self._rate_limit_storage = MemoryStorage() self._rate_limit_strategy = FixedWindowRateLimiter(self._rate_limit_storage) self._rate_limit_item = parse_rate_limit(rate_limit_setting)
def test_in_memory_moving_window_clear(self): storage = MemoryStorage() limiter = MovingWindowRateLimiter(storage) per_min = RateLimitItemPerMinute(1) limiter.hit(per_min) self.assertFalse(limiter.hit(per_min)) limiter.clear(per_min) self.assertTrue(limiter.hit(per_min))
def test_in_memory(self): with hiro.Timeline().freeze() as timeline: storage = MemoryStorage() limiter = FixedWindowRateLimiter(storage) per_min = RateLimitItemPerMinute(10) for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) self.assertFalse(limiter.hit(per_min)) timeline.forward(61) self.assertTrue(limiter.hit(per_min))
def test_test_moving_window(self): with hiro.Timeline().freeze(): store = MemoryStorage() limit = RateLimitItemPerSecond(2, 1) limiter = MovingWindowRateLimiter(store) self.assertTrue(limiter.hit(limit), store) self.assertTrue(limiter.test(limit), store) self.assertTrue(limiter.hit(limit), store) self.assertFalse(limiter.test(limit), store) self.assertFalse(limiter.hit(limit), store)
def test_test_fixed_window(self): with hiro.Timeline().freeze() as timeline: store = MemoryStorage() limiter = FixedWindowRateLimiter(store) limit = RateLimitItemPerSecond(2, 1) self.assertTrue(limiter.hit(limit), store) self.assertTrue(limiter.test(limit), store) self.assertTrue(limiter.hit(limit), store) self.assertFalse(limiter.test(limit), store) self.assertFalse(limiter.hit(limit), store)
def test_in_memory_expiry(self): with hiro.Timeline().freeze() as timeline: storage = MemoryStorage() limiter = FixedWindowRateLimiter(storage) per_min = RateLimitItemPerMinute(10) for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) timeline.forward(60) # touch another key and yield limiter.hit(RateLimitItemPerSecond(1)) time.sleep(0.1) self.assertTrue(per_min.key_for() not in storage.storage)
def test_in_memory_expiry_moving_window(self): with hiro.Timeline().freeze() as timeline: storage = MemoryStorage() limiter = MovingWindowRateLimiter(storage) per_min = RateLimitItemPerMinute(10) per_sec = RateLimitItemPerSecond(1) for i in range(0, 2): for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) timeline.forward(60) self.assertTrue(limiter.hit(per_sec)) time.sleep(1) self.assertEqual([], storage.events[per_min.key_for()])
def test_fixed_window(self): storage = MemoryStorage() limiter = FixedWindowRateLimiter(storage) with hiro.Timeline().freeze() as timeline: start = int(time.time()) limit = RateLimitItemPerSecond(10, 2) self.assertTrue(all([limiter.hit(limit) for _ in range(0, 10)])) timeline.forward(1) self.assertFalse(limiter.hit(limit)) self.assertEqual(limiter.get_window_stats(limit)[1], 0) self.assertEqual(limiter.get_window_stats(limit)[0], start + 2) timeline.forward(1) self.assertEqual(limiter.get_window_stats(limit)[1], 10) self.assertTrue(limiter.hit(limit))
def __configure_fallbacks(self, app, strategy): config = app.config fallback_enabled = config.get(C.IN_MEMORY_FALLBACK_ENABLED, False) fallback_limits = config.get(C.IN_MEMORY_FALLBACK, None) if not self._in_memory_fallback and fallback_limits: self._in_memory_fallback = [ LimitGroup(fallback_limits, self._key_func, None, False, None, None, None, None, None) ] if not self._in_memory_fallback_enabled: self._in_memory_fallback_enabled = ( fallback_enabled or len(self._in_memory_fallback) > 0) if self._in_memory_fallback_enabled: self._fallback_storage = MemoryStorage() self._fallback_limiter = STRATEGIES[strategy]( self._fallback_storage)
def test_moving_window_in_memory(self): storage = MemoryStorage() limiter = MovingWindowRateLimiter(storage) with hiro.Timeline().freeze() as timeline: limit = RateLimitItemPerMinute(10) for i in range(0, 5): self.assertTrue(limiter.hit(limit)) self.assertTrue(limiter.hit(limit)) self.assertEqual( limiter.get_window_stats(limit)[1], 10 - ((i + 1) * 2)) timeline.forward(10) self.assertEqual(limiter.get_window_stats(limit)[1], 0) self.assertFalse(limiter.hit(limit)) timeline.forward(20) self.assertEqual(limiter.get_window_stats(limit)[1], 2) self.assertEqual( limiter.get_window_stats(limit)[0], int(time.time() + 30)) timeline.forward(31) self.assertEqual(limiter.get_window_stats(limit)[1], 10)
def test_fixed_window_with_elastic_expiry_in_memory(self): storage = MemoryStorage() limiter = FixedWindowElasticExpiryRateLimiter(storage) with hiro.Timeline().freeze() as timeline: start = int(time.time()) limit = RateLimitItemPerSecond(10, 2) self.assertTrue(all([limiter.hit(limit) for _ in range(0, 10)])) timeline.forward(1) self.assertFalse(limiter.hit(limit)) self.assertEqual(limiter.get_window_stats(limit)[1], 0) # three extensions to the expiry self.assertEqual(limiter.get_window_stats(limit)[0], start + 3) timeline.forward(1) self.assertFalse(limiter.hit(limit)) timeline.forward(3) start = int(time.time()) self.assertTrue(limiter.hit(limit)) self.assertEqual(limiter.get_window_stats(limit)[1], 9) self.assertEqual(limiter.get_window_stats(limit)[0], start + 2)
def test_memory_storage_moving_window(self): storage = MemoryStorage() limiter = MovingWindowRateLimiter(storage) per_second = RateLimitItemPerSecond(100) [limiter.hit(per_second, uuid4().hex) for _ in range(100)] key = uuid4().hex hits = [] def hit(): if limiter.hit(per_second, key): hits.append(None) start = time.time() threads = [threading.Thread(target=hit) for _ in range(1000)] [t.start() for t in threads] [t.join() for t in threads] self.assertTrue(time.time() - start < 1) self.assertEqual(len(hits), 100)
async def before_server_start(app_: Sanic, loop): mzk.set_process_name(f'{app_.name} --- listener {mzk.get_my_pid()}') mzk.print_info(f'Starting Sanic server. -- {mzk.get_my_pid()}') app_.system_config: mzk.MocaConfig = mzk.MocaConfig(core.SYSTEM_CONFIG, manual_reload=True) app_.commands: mzk.MocaSynchronizedJSONDictFile = mzk.MocaSynchronizedJSONDictFile( core.COMMANDS_CONFIG, manual_reload=True) app_.ip_blacklist: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile( core.IP_BLACKLIST_FILE, manual_reload=True, remove_duplicates=True, ) app_.api_key_config: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile( core.API_KEY_FILE, manual_reload=True) app_.dict_cache = {} app_.secure_log = mzk.MocaFileLog(core.LOG_DIR.joinpath('secure.log')) app_.scheduler = mzk.MocaScheduler() if core.SERVER_CONFIG['rate_limiter_redis_storage'] is None: app_._storage_for_rate_limiter = MemoryStorage() else: app_._storage_for_rate_limiter = RedisStorage( core.SERVER_CONFIG['rate_limiter_redis_storage']) app_.rate_limiter = FixedWindowElasticExpiryRateLimiter( app_._storage_for_rate_limiter) def __reload_timer(application: Sanic) -> None: while True: mzk.sleep(1) application.system_config.reload_file() application.commands.reload_file() application.ip_blacklist.reload_file() application.api_key_config.reload_file() app_._timer_thread = Thread(target=__reload_timer, args=(app_, ), daemon=True) app_._timer_thread.start()
class MemoryStorageTests(unittest.TestCase): def setUp(self): self.storage = MemoryStorage() def test_in_memory(self): with hiro.Timeline().freeze() as timeline: limiter = FixedWindowRateLimiter(self.storage) per_min = RateLimitItemPerMinute(10) for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) self.assertFalse(limiter.hit(per_min)) timeline.forward(61) self.assertTrue(limiter.hit(per_min)) def test_fixed_window_clear(self): limiter = FixedWindowRateLimiter(self.storage) per_min = RateLimitItemPerMinute(1) limiter.hit(per_min) self.assertFalse(limiter.hit(per_min)) limiter.clear(per_min) self.assertTrue(limiter.hit(per_min)) def test_moving_window_clear(self): limiter = MovingWindowRateLimiter(self.storage) per_min = RateLimitItemPerMinute(1) limiter.hit(per_min) self.assertFalse(limiter.hit(per_min)) limiter.clear(per_min) self.assertTrue(limiter.hit(per_min)) def test_reset(self): limiter = FixedWindowRateLimiter(self.storage) per_min = RateLimitItemPerMinute(10) for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) self.assertFalse(limiter.hit(per_min)) self.storage.reset() for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) self.assertFalse(limiter.hit(per_min)) def test_expiry(self): with hiro.Timeline().freeze() as timeline: limiter = FixedWindowRateLimiter(self.storage) per_min = RateLimitItemPerMinute(10) for i in range(0, 10): self.assertTrue(limiter.hit(per_min)) timeline.forward(60) # touch another key and yield limiter.hit(RateLimitItemPerSecond(1)) time.sleep(0.1) self.assertTrue(per_min.key_for() not in self.storage.storage) def test_expiry_moving_window(self): with hiro.Timeline().freeze() as timeline: limiter = MovingWindowRateLimiter(self.storage) per_min = RateLimitItemPerMinute(10) per_sec = RateLimitItemPerSecond(1) for _ in range(0, 2): for _ in range(0, 10): self.assertTrue(limiter.hit(per_min)) timeline.forward(60) self.assertTrue(limiter.hit(per_sec)) timeline.forward(1) time.sleep(0.1) self.assertEqual([], self.storage.events[per_min.key_for()])
async def before_server_start(app_: Sanic, loop): mzk.set_process_name(f'{app_.name} --- listener {mzk.get_my_pid()}') mzk.print_info(f'Starting Sanic server. -- {mzk.get_my_pid()}') app_.system_config: mzk.MocaConfig = mzk.MocaConfig(core.SYSTEM_CONFIG, manual_reload=True) app_.ip_blacklist: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile( core.IP_BLACKLIST_FILE, manual_reload=True, remove_duplicates=True, ) app_.api_key_config: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile( core.API_KEY_FILE, manual_reload=True) app_.twitter: mzk.MocaTwitter = mzk.MocaTwitter( core.TWITTER_CONFIG['CONSUMER_KEY'], core.TWITTER_CONFIG['CONSUMER_SECRET'], core.TWITTER_CONFIG['ACCESS_TOKEN'], core.TWITTER_CONFIG['ACCESS_TOKEN_SECRET']) app_.dict_cache = {} app_.secure_log = mzk.MocaFileLog(core.LOG_DIR.joinpath('secure.log')) app_.scheduler = mzk.MocaScheduler() if core.SERVER_CONFIG['rate_limiter_redis_storage'] is None: app_._storage_for_rate_limiter = MemoryStorage() else: app_._storage_for_rate_limiter = RedisStorage( core.SERVER_CONFIG['rate_limiter_redis_storage']) app_.rate_limiter = FixedWindowElasticExpiryRateLimiter( app_._storage_for_rate_limiter) try: app_.mysql = mzk.MocaMysql( core.DB_CONFIG['mysql']['host'], int(core.DB_CONFIG['mysql']['port']), core.DB_CONFIG['mysql']['user'], core.DB_CONFIG['mysql']['password'], core.DB_CONFIG['mysql']['database'], int(core.DB_CONFIG['mysql']['min_size']), int(core.DB_CONFIG['mysql']['max_size']), ) app_.mysql.force_sync = mzk.try_to_bool( core.DB_CONFIG['mysql']['force_sync']) except KeyError as e: mzk.print_error( f'Mysql database configuration error. missing key: {e}') mzk.sys_exit(1) except MySQLError as e: mzk.print_error( "Can't connect to MySQL database, Please check your database configuration." ) mzk.print_error("And make sure your database is online.") mzk.print_error( "You can use 'python3 moca.py test-mysql-con' to check your database." ) mzk.print_error(f"<MySQLError: {e}>") mzk.sys_exit(1) try: app_.redis = mzk.MocaRedis( core.DB_CONFIG['redis']['host'], int(core.DB_CONFIG['redis']['port']), int(core.DB_CONFIG['redis']['db']), core.DB_CONFIG['redis']['password'], int(core.DB_CONFIG['mysql']['min_size']), int(core.DB_CONFIG['mysql']['max_size']), ) app_.redis.prefix = core.DB_CONFIG['redis']['prefix'] await app_.redis.test_con() except KeyError as e: mzk.print_error( f'Redis database configuration error. missing key: {e}') mzk.sys_exit(1) except (RedisError, ConnectionRefusedError) as e: mzk.print_error( "Can't connect to Redis database, Please check your database configuration." ) mzk.print_error("And make sure your database is online.") mzk.print_error( "You can use 'python3 moca.py test-redis-con' to check your database." ) mzk.print_error(f"<(RedisError, ConnectionRefusedError): {e}>") mzk.sys_exit(1) try: app_.simple_cache = mzk.MocaSimpleCache( int(core.DB_CONFIG['simple_cache']['pool_size']), int(core.DB_CONFIG['simple_cache']['page_size']), ) except KeyError as e: mzk.print_error(f'SimpleCache configuration error. missing key: {e}') mzk.sys_exit(1) def __reload_timer(application: Sanic) -> None: while True: mzk.sleep(1) application.system_config.reload_file() application.ip_blacklist.reload_file() application.api_key_config.reload_file() app_._timer_thread = Thread(target=__reload_timer, args=(app_, ), daemon=True) app_._timer_thread.start()
def init_app(self, app): """ :param app: :class:`flask.Flask` instance to rate limit. """ self.enabled = app.config.setdefault(C.ENABLED, self.enabled) self._swallow_errors = app.config.setdefault( C.SWALLOW_ERRORS, self._swallow_errors ) self._headers_enabled = ( self._headers_enabled or app.config.setdefault(C.HEADERS_ENABLED, False) ) self._storage_options.update(app.config.get(C.STORAGE_OPTIONS, {})) self._storage = storage_from_string( self._storage_uri or app.config.setdefault(C.STORAGE_URL, 'memory://'), **self._storage_options ) strategy = ( self._strategy or app.config.setdefault(C.STRATEGY, 'fixed-window') ) if strategy not in STRATEGIES: raise ConfigurationError( "Invalid rate limiting strategy %s" % strategy ) self._limiter = STRATEGIES[strategy](self._storage) self._header_mapping.update( { HEADERS.RESET: self._header_mapping.get(HEADERS.RESET, None) or app.config.setdefault(C.HEADER_RESET, "X-RateLimit-Reset"), HEADERS.REMAINING: self._header_mapping.get(HEADERS.REMAINING, None) or app.config.setdefault( C.HEADER_REMAINING, "X-RateLimit-Remaining" ), HEADERS.LIMIT: self._header_mapping.get(HEADERS.LIMIT, None) or app.config.setdefault(C.HEADER_LIMIT, "X-RateLimit-Limit"), HEADERS.RETRY_AFTER: self._header_mapping.get(HEADERS.RETRY_AFTER, None) or app.config.setdefault(C.HEADER_RETRY_AFTER, "Retry-After"), } ) self._retry_after = ( self._retry_after or app.config.get(C.HEADER_RETRY_AFTER_VALUE) ) self._key_prefix = (self._key_prefix or app.config.get(C.KEY_PREFIX)) app_limits = app.config.get(C.APPLICATION_LIMITS, None) if not self._application_limits and app_limits: self._application_limits = [ LimitGroup( app_limits, self._key_func, "global", False, None, None, None ) ] if app.config.get(C.GLOBAL_LIMITS, None): self.raise_global_limits_warning() conf_limits = app.config.get( C.GLOBAL_LIMITS, app.config.get(C.DEFAULT_LIMITS, None) ) if not self._default_limits and conf_limits: self._default_limits = [ LimitGroup( conf_limits, self._key_func, None, False, None, None, None ) ] fallback_limits = app.config.get(C.IN_MEMORY_FALLBACK, None) if not self._in_memory_fallback and fallback_limits: self._in_memory_fallback = [ LimitGroup( fallback_limits, self._key_func, None, False, None, None, None ) ] if self._in_memory_fallback: self._fallback_storage = MemoryStorage() self._fallback_limiter = STRATEGIES[strategy]( self._fallback_storage ) # purely for backward compatibility as stated in flask documentation if not hasattr(app, 'extensions'): app.extensions = {} # pragma: no cover if not app.extensions.get('limiter'): if self._auto_check: app.before_request(self.__check_request_limit) app.after_request(self.__inject_headers) app.extensions['limiter'] = self
async def before_server_start(app_: Sanic, loop): mzk.set_process_name(f'{app_.name} --- listener {mzk.get_my_pid()}') mzk.print_info(f'Starting Sanic server. -- {mzk.get_my_pid()}') app_.system_config: mzk.MocaConfig = mzk.MocaConfig(core.SYSTEM_CONFIG, manual_reload=True) app_.ip_blacklist: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile( core.IP_BLACKLIST_FILE, manual_reload=True, remove_duplicates=True, ) app_.api_key_config: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile( core.API_KEY_FILE, manual_reload=True) app_.flags = mzk.MocaSynchronizedJSONDictFile(core.FLAGS_FILE, manual_reload=True) app_.dict_cache = {} app_.secure_log = mzk.MocaFileLog(core.LOG_DIR.joinpath('secure.log')) app_.scheduler = mzk.MocaScheduler() if core.SERVER_CONFIG['rate_limiter_redis_storage'] is None: app_._storage_for_rate_limiter = MemoryStorage() else: app_._storage_for_rate_limiter = RedisStorage( core.SERVER_CONFIG['rate_limiter_redis_storage']) app_.rate_limiter = FixedWindowElasticExpiryRateLimiter( app_._storage_for_rate_limiter) try: app_.mysql = mzk.MocaMysql( core.DB_CONFIG['mysql']['host'], int(core.DB_CONFIG['mysql']['port']), core.DB_CONFIG['mysql']['user'], core.DB_CONFIG['mysql']['password'], core.DB_CONFIG['mysql']['database'], int(core.DB_CONFIG['mysql']['min_size']), int(core.DB_CONFIG['mysql']['max_size']), ) app_.mysql.force_sync = mzk.try_to_bool( core.DB_CONFIG['mysql']['force_sync']) except KeyError as e: mzk.print_error( f'Mysql database configuration error. missing key: {e}') mzk.sys_exit(1) except MySQLError as e: mzk.print_error( "Can't connect to MySQL database, Please check your database configuration." ) mzk.print_error("And make sure your database is online.") mzk.print_error( "You can use 'python3 moca.py test-mysql-con' to check your database." ) mzk.print_error(f"<MySQLError: {e}>") mzk.sys_exit(1) def __reload_timer(application: Sanic) -> None: while True: mzk.sleep(1) application.system_config.reload_file() application.ip_blacklist.reload_file() application.api_key_config.reload_file() application.flags.reload_file() app_._timer_thread = Thread(target=__reload_timer, args=(app_, ), daemon=True) app_._timer_thread.start() app_.bots = {} def reload_bot(the_updated_key, old_value, new_value, *args, **kwargs) -> None: application = kwargs['app'] for bot_dir in core.STORAGE_DIR.iterdir(): if bot_dir.is_dir(): application.bots[bot_dir.name] = mzk.MocaBot( bot_dir.name, bot_dir) con = application.mysql.get_a_new_con() cursor = con.cursor() cursor.execute(core.GET_BOTS_QUERY) res = cursor.fetchall() con.close() application.dict_cache['id'] = {} application.dict_cache['name'] = {} for info in res: application.dict_cache['id'][info[0]] = info[1] application.dict_cache['name'][info[1]] = info[0] reload_bot(None, None, None, app=app_) app_.flags.set('moca_bot_reload', False) app_.flags.add_handler('moca_bot_reload' + str(mzk.get_my_pid()), 'moca_bot_reload', reload_bot, kwargs={'app': app_})
def __init__( self, # app: Starlette = None, key_func: Callable[..., str], default_limits: List[StrOrCallableStr] = [], application_limits: List[StrOrCallableStr] = [], headers_enabled: bool = False, strategy: Optional[str] = None, storage_uri: Optional[str] = None, storage_options: Dict[str, str] = {}, auto_check: bool = True, swallow_errors: bool = False, in_memory_fallback: List[StrOrCallableStr] = [], in_memory_fallback_enabled: bool = False, retry_after: Optional[str] = None, key_prefix: str = "", enabled: bool = True, config_filename: Optional[str] = None, ) -> None: """ Configure the rate limiter at app level """ # assert app is not None, "Passing the app instance to the limiter is required" # self.app = app # app.state.limiter = self self.logger = logging.getLogger("slowapi") self.app_config = Config( config_filename if config_filename is not None else ".env") self.enabled = enabled self._default_limits = [] self._application_limits = [] self._in_memory_fallback: List[LimitGroup] = [] self._in_memory_fallback_enabled = (in_memory_fallback_enabled or len(in_memory_fallback) > 0) self._exempt_routes: Set[str] = set() self._request_filters: List[Callable[..., bool]] = [] self._headers_enabled = headers_enabled self._header_mapping: Dict[int, str] = {} self._retry_after: Optional[str] = retry_after self._strategy = strategy self._storage_uri = storage_uri self._storage_options = storage_options self._auto_check = auto_check self._swallow_errors = swallow_errors self._key_func = key_func self._key_prefix = key_prefix for limit in set(default_limits): self._default_limits.extend([ LimitGroup(limit, self._key_func, None, False, None, None, None, False) ]) for limit in application_limits: self._application_limits.extend([ LimitGroup(limit, self._key_func, "global", False, None, None, None, False) ]) for limit in in_memory_fallback: self._in_memory_fallback.extend([ LimitGroup(limit, self._key_func, None, False, None, None, None, False) ]) self._route_limits: Dict[str, List[Limit]] = {} self._dynamic_route_limits: Dict[str, List[LimitGroup]] = {} # a flag to note if the storage backend is dead (not available) self._storage_dead: bool = False self._fallback_limiter = None self.__check_backend_count = 0 self.__last_check_backend = time.time() self.__marked_for_limiting: Dict[str, List[Callable]] = {} class BlackHoleHandler(logging.StreamHandler): def emit(*_): return self.logger.addHandler(BlackHoleHandler()) self.enabled = self.get_app_config(C.ENABLED, self.enabled) self._swallow_errors = self.get_app_config(C.SWALLOW_ERRORS, self._swallow_errors) self._headers_enabled = self._headers_enabled or self.get_app_config( C.HEADERS_ENABLED, False) self._storage_options.update(self.get_app_config( C.STORAGE_OPTIONS, {})) self._storage: Storage = storage_from_string( self._storage_uri or self.get_app_config(C.STORAGE_URL, "memory://"), **self._storage_options, ) strategy = self._strategy or self.get_app_config( C.STRATEGY, "fixed-window") if strategy not in STRATEGIES: raise ConfigurationError("Invalid rate limiting strategy %s" % strategy) self._limiter: RateLimiter = STRATEGIES[strategy](self._storage) self._header_mapping.update({ HEADERS.RESET: self._header_mapping.get( HEADERS.RESET, self.get_app_config(C.HEADER_RESET, "X-RateLimit-Reset"), ), HEADERS.REMAINING: self._header_mapping.get( HEADERS.REMAINING, self.get_app_config(C.HEADER_REMAINING, "X-RateLimit-Remaining"), ), HEADERS.LIMIT: self._header_mapping.get( HEADERS.LIMIT, self.get_app_config(C.HEADER_LIMIT, "X-RateLimit-Limit"), ), HEADERS.RETRY_AFTER: self._header_mapping.get( HEADERS.RETRY_AFTER, self.get_app_config(C.HEADER_RETRY_AFTER, "Retry-After"), ), }) self._retry_after = self._retry_after or self.get_app_config( C.HEADER_RETRY_AFTER_VALUE) self._key_prefix = self._key_prefix or self.get_app_config( C.KEY_PREFIX) app_limits: Optional[StrOrCallableStr] = self.get_app_config( C.APPLICATION_LIMITS, None) if not self._application_limits and app_limits: self._application_limits = [ LimitGroup(app_limits, self._key_func, "global", False, None, None, None, False) ] conf_limits: Optional[StrOrCallableStr] = self.get_app_config( C.DEFAULT_LIMITS, None) if not self._default_limits and conf_limits: self._default_limits = [ LimitGroup(conf_limits, self._key_func, None, False, None, None, None, False) ] fallback_enabled = self.get_app_config(C.IN_MEMORY_FALLBACK_ENABLED, False) fallback_limits: Optional[StrOrCallableStr] = self.get_app_config( C.IN_MEMORY_FALLBACK, None) if not self._in_memory_fallback and fallback_limits: self._in_memory_fallback = [ LimitGroup( fallback_limits, self._key_func, None, False, None, None, None, False, ) ] if not self._in_memory_fallback_enabled: self._in_memory_fallback_enabled = ( fallback_enabled or len(self._in_memory_fallback) > 0) if self._in_memory_fallback_enabled: self._fallback_storage = MemoryStorage() self._fallback_limiter = STRATEGIES[strategy]( self._fallback_storage)
def setUp(self): self.storage = MemoryStorage()