def set_redis_revoked_task_pool(): """Set redis connection pool for worker status.""" global REVOKED_TASK_POOL if REVOKED_TASK_POOL is None: REVOKED_TASK_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL)
def set_redis_socket_pool(): """Set redis connection pool via Unix socket file.""" global SOCKET_POOL if SOCKET_POOL is None: SOCKET_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_UNIX_DOMAIN_SOCKET)
def main(): import gevent.monkey gevent.monkey.patch_all() from gevent.queue import LifoQueue # These imports are inside the __main__ block # to make sure that we only import from rq_gevent_worker # (which has the side effect of applying gevent monkey patches) # in the worker process. This way other processes can import the # redis connection without that side effect. import os from redis import BlockingConnectionPool, StrictRedis from rq import Queue, Connection from dallinger.heroku.rq_gevent_worker import GeventWorker as Worker from dallinger.config import initialize_experiment_package initialize_experiment_package(os.getcwd()) import logging logging.basicConfig(format="%(asctime)s %(message)s", level=logging.DEBUG) redis_url = os.getenv("REDIS_URL", "redis://localhost:6379") # Specify queue class for improved performance with gevent. # see http://carsonip.me/posts/10x-faster-python-gevent-redis-connection-pool/ redis_pool = BlockingConnectionPool.from_url(redis_url, queue_class=LifoQueue) redis_conn = StrictRedis(connection_pool=redis_pool) with Connection(redis_conn): worker = Worker(list(map(Queue, listen))) worker.work()
def set_redis_job_status_pool(): """Set redis connection pool for job status.""" global JOB_STATUS_POOL if JOB_STATUS_POOL is None: JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL)
def set_redis_event_status_pool(): """Set redis connection pool for event status.""" global EVENT_STATUS_POOL if EVENT_STATUS_POOL is None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL)
def set_redis_job_info_pool(): """Set redis connection pool for job info metrics.""" global JOB_INFO_POOL if JOB_INFO_POOL is None: JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL)
def create_app(config='app.configs.DevConfig', redis=None): app = Flask(__name__) app.config.from_object(config) for i in namespaces: api.add_namespace(i) api_bp = Blueprint('api_blueprint', __name__) api.init_app(api_bp) db.init_app(app) cors.init_app(app) jwt.init_app(app) mail.init_app(app) migrate.init_app(app, db) init_celery(app) redis = redis or Redis(connection_pool=BlockingConnectionPool.from_url( app.config['REDIS_URL'], decode_responses=True), ) refresh_tokens.init_app(redis, ttl=app.config['JWT_REFRESH_TOKEN_EXPIRES']) email_confirm_tokens.init_app(redis, ttl=app.config['JWT_ACCESS_TOKEN_EXPIRES']) change_password_tokens.init_app(redis, ttl=app.config['JWT_ACCESS_TOKEN_EXPIRES']) access_tokens_blacklist.init_app( redis, ttl=app.config['JWT_ACCESS_TOKEN_EXPIRES']) app.register_blueprint(api_bp, url_prefix=app.config['API_PREFIX']) register_jwt(jwt) for c in commands: app.cli.add_command(c) return app
def set_redis_worker_status_pool(): """Set redis connection pool for worker status.""" global WORKER_STATUS_POOL if WORKER_STATUS_POOL is None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL)
def __init__(self, cfg): self.simhash_size = cfg['simhash']['size'] self.simhash_expire = cfg['simhash']['expire_after'] headers = { 'User-Agent': 'wayback-discover-diff', 'Accept-Encoding': 'gzip,deflate', 'Connection': 'keep-alive' } cdx_auth_token = cfg.get('cdx_auth_token') if cdx_auth_token: headers = dict(cookie='cdx_auth_token=%s' % cdx_auth_token) self.http = urllib3.HTTPConnectionPool('web.archive.org', maxsize=50, retries=urllib3.Retry( 3, redirect=2), headers=headers) self.redis_db = StrictRedis( connection_pool=BlockingConnectionPool.from_url( cfg['redis_uri'], max_connections=50, timeout=cfg.get('redis_timeout', 10), decode_responses=True)) self.tpool = ThreadPoolExecutor(max_workers=cfg['threads']) self.snapshots_number = cfg['snapshots']['number_per_year'] # Initialize logger self._log = logging.getLogger('wayback_discover_diff.worker')
def main(host, port, database): maybe_enable_rollbar() connection_pool = BlockingConnectionPool( host=host, port=port, db=database, max_connections=MAX_CONNECTIONS, timeout=WAIT_TIMEOUT, socket_timeout=SOCKET_TIMEOUT, ) client = StrictRedis(host, port, database, connection_pool=connection_pool) batch_client = client.pipeline() count = 0 for name in client.scan_iter(count=100): if name == "ElastiCacheMasterReplicationTimestamp": continue batch_client.delete(name) count += 1 batch_client.execute() print("{} heartbeats deleted!".format(count)) exit(0)
def _red(): # poor man's singleton red = get_redis_pool() global RED_POOL if not RED_POOL: RED_POOL = BlockingConnectionPool(max_connections=4) return Redis(connection_pool=red)
def __init__(self, name, **kwargs): """ init初始化 :param name: :param kwargs: """ self.name = name self.__conn = Redis(connection_pool=BlockingConnectionPool(**kwargs))
def __init__(self, **kwargs): from redis import Redis host = 'localhost' if 'host' in kwargs: host = kwargs['host'] port = 6379 if 'port' in kwargs: port = kwargs['port'] password = None if 'password' in kwargs: password = kwargs['password'] encoding = 'utf-8' if 'encoding' in kwargs: encoding = kwargs['encoding'] db = 0 if 'db' in kwargs: db = kwargs['db'] maxconn = 64 if 'maxconn' in kwargs: maxconn = kwargs['maxconn'] poolParam = { 'db': db, 'password': password, 'socket_timeout': None, 'encoding': encoding, 'encoding_errors': 'strict', 'decode_responses': False, 'retry_on_timeout': False, 'max_connections': maxconn, 'host': host, 'port': port, 'socket_connect_timeout': None, 'socket_keepalive': True, 'socket_keepalive_options': None, } if 'safe' in kwargs and kwargs['safe']: from redis import BlockingConnectionPool connectionPool = BlockingConnectionPool(**poolParam) else: from redis import ConnectionPool connectionPool = ConnectionPool(**poolParam) self.server = Redis(host=host, port=port, db=db, password=password, socket_keepalive=True, encoding=encoding, connection_pool=connectionPool)
def __init__(self, redis_uri: str = "redis://localhost:6379/1", max_connections=20): self.__connection_pool = BlockingConnectionPool.from_url( redis_uri, max_connections=max_connections) self.__encoder = WormholePickleEncoder() self.__closed = False self.__send_rate = -1 self.__receive_rate = -1 self.stats_enabled = True
def __init__(self, host: str, port: int, password: Optional[str]): """ Initialize the redis locker with the provided redis configuration Args: host: redis service host address port: redis service port password: redis service access password """ self._client = Redis(connection_pool=BlockingConnectionPool(host=host, port=port, password=password))
def __create_connection_pool__(self) -> BlockingConnectionPool: connection_class = Connection( host=os.environ.get('REDIS_HOST'), port=os.environ.get('REDIS_PORT', 6379), db=self.config.get('REDIS_DB', 0), username=self.config.get('REDIS_USERNAME', None), password=os.environ.get('REDIS_PASSWORD', None), socket_timeout=BaseConfig.REDIS_SOCKET_TIMEOUT, socket_connect_timeout=BaseConfig.REDIS_SOCKET_CONNECT_TIMEOUT, socket_keepalive=BaseConfig.REDIS_SOCKET_KEEP_ALIVE, health_check_interval=BaseConfig.REDIS_HEALTH_CHECK_INTERVAL, client_name=BaseConfig.REDIS_CLIENT_NAME, encoding='utf-8', decode_responses=True) pool = BlockingConnectionPool( timeout=BaseConfig.REDIS_CONNECTION_TIMEOUT, max_connections=BaseConfig.REDIS_MAX_CONNECTION) pool.connection_class = connection_class return pool
def __init__(self, startup_nodes): self.nodemanager = NodeManager(startup_nodes=startup_nodes) self.nodemanager.initialize() self.redis_worker = {} for node, config in self.nodemanager.nodes.items(): rdp = BlockingConnectionPool(host=config["host"], port=config["port"]) self.redis_worker[node] = { "worker": StrictRedis(connection_pool=rdp, decode_responses=False), "type": config["server_type"] }
def alloc(self, node, max_connections=2**16, timeout=20, db=0): host = node.split(':')[0] port = int(node.split(':')[1]) pool = BlockingConnectionPool(max_connections=max_connections, timeout=timeout, **{ 'host': host, 'port': port, 'db': db }) return RedisCache(pool)
def __init__(self, custom_settings=None): if self.pool is None and custom_settings is None: raise Exception( 'Trying to use uninitialized RedisConnection class') settings = dict( decode_responses=True, retry_on_timeout=True, ) settings.update(custom_settings) self.pool = BlockingConnectionPool(**settings)
def notify_write(type, ip, appname, state, output): global REDIS_HOST, REDIS_PORT, REDIS_DB_NUM, REDIS_PASSWORD pool = BlockingConnectionPool(max_connections=1, timeout=5, socket_timeout=5, \ host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB_NUM, password=REDIS_PASSWORD) redis_db = redis.StrictRedis(connection_pool=pool) logger = logging.getLogger() dd = {} dd['time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # nagios dd['host'] = redis_db.hget('ip_host', ip) dd['appname'] = appname dd['type'] = type dd['state'] = state dd['information'] = output message = json.dumps(dd) redis_db.rpush('notification', message) pool.disconnect() # 记录日志 logger.info(u'记录通知信息:%s', message)
def __init__(self, host, port, db=0): """ init :param host: host :param port: port :return: """ self.origin_ids = 'movieID' self.fail_ids = 'failID' self.succ_ids = 'succID' self.extracted_ids = 'extractedID' self.conn = Redis(connection_pool=BlockingConnectionPool( host=host, port=port, db=db))
def __get_redis_client(self) -> RedisCluster: if not self.__redis_client: if len(startup_nodes) > 1: self.__redis_client = RedisCluster(startup_nodes=startup_nodes, decode_responses=False, password=server_config.redis_server_password) else: node = startup_nodes[0] host = node["host"] port = node["port"] self.__redis_client = Redis(connection_pool=BlockingConnectionPool( host=host, port=port, password=server_config.redis_server_password, db=release_cache_db_index) ) return self.__redis_client
def LedisFactory(settings): from ledis import BlockingConnectionPool from walrus.tusks.ledisdb import WalrusLedis default = get_settings('pyramid_walrus.backend.ledis.', settings, { 'url': 'ledis://@localhost:6380/0', 'max_connections': '10', }) connection_url = default['url'] max_connection = default['max_connections'] max_connection = int(max_connection) if max_connection else None connection_pool = BlockingConnectionPool.from_url( url=connection_url, max_connections=max_connection) logger.debug('connection_pool: %r', connection_pool) def get_database_for_request(request): return WalrusLedis(connection_pool=connection_pool) return get_database_for_request
def _get_connection_pool(host, port, db): # get_redis_client() is called once per sync process at the time of # instantiating the singleton HeartBeatStore, so doing this here # should be okay for now. # TODO[k]: Refactor. global connection_pool_map connection_pool = connection_pool_map.get(db) if connection_pool is None: connection_pool = BlockingConnectionPool( host=host, port=port, db=db, max_connections=MAX_CONNECTIONS, timeout=WAIT_TIMEOUT, socket_timeout=SOCKET_TIMEOUT) connection_pool_map[db] = connection_pool return connection_pool
def make_redis_region(app, prefix): expiration_time = app.config.setdefault('REDICA_DEFAULT_EXPIRE', 3600) key_mangler = functools.partial(_md5_key_mangler, prefix) redica_cache_url = app.config.get('REDICA_CACHE_URL') cfg = { 'backend': 'extended_redis_backend', 'expiration_time': expiration_time, 'arguments': { 'redis_expiration_time': expiration_time + 30, 'key_mangler': key_mangler, } } if app.config.get('REDICA_CACHE_POOL_BLOCKING', True): cfg['arguments']['connection_pool'] = BlockingConnectionPool.from_url( redica_cache_url) else: cfg['arguments']['url'] = redica_cache_url return dict(default=make_region().configure(**cfg))
def main(): import gevent.monkey gevent.monkey.patch_all() from gevent.queue import LifoQueue # These imports are inside the __main__ block # to make sure that we only import from rq_gevent_worker # (which has the side effect of applying gevent monkey patches) # in the worker process. This way other processes can import the # redis connection without that side effect. import logging import os from redis import BlockingConnectionPool, StrictRedis from rq import Queue, Connection from six.moves.urllib.parse import urlparse from dallinger.heroku.rq_gevent_worker import GeventWorker as Worker from dallinger.config import initialize_experiment_package initialize_experiment_package(os.getcwd()) logging.basicConfig(format="%(asctime)s %(message)s", level=logging.DEBUG) redis_url = os.getenv("REDIS_URL", "redis://localhost:6379") # Specify queue class for improved performance with gevent. # see http://carsonip.me/posts/10x-faster-python-gevent-redis-connection-pool/ connection_args = { "url": redis_url, "queue_class": LifoQueue, } # Since we are generally running on Heroku, and configuring SSL certificates # is challenging, we disable cert requirements on secure connections. if urlparse(redis_url).scheme == "rediss": connection_args["ssl_cert_reqs"] = None redis_pool = BlockingConnectionPool.from_url(**connection_args) redis_conn = StrictRedis(connection_pool=redis_pool) with Connection(redis_conn): worker = Worker(list(map(Queue, listen))) worker.work()
def includeme(config): """Factory for Redis pyramid client.""" dsn = config.registry.settings.get('redis.dsn') max_connections = config.registry.settings.get('redis.max_connections', 4) redis_client = None is_type = lambda name: dsn.startswith(name + '://') if any(is_type(t) for t in ['redis', 'rediss', 'unix']): from redis import StrictRedis redis_client = StrictRedis.from_url(dsn) elif any(is_type(t + '+blocking') for t in ['redis', 'rediss', 'unix']): from redis import BlockingConnectionPool from redis import StrictRedis # Strip the +blocking from dns, underlaying client # does not support this scheme. dsn = dsn.replace('+blocking:', ':') pool = BlockingConnectionPool.from_url(dsn, max_connections=max_connections) redis_client = StrictRedis(connection_pool=pool) elif is_type('fakeredis'): import fakeredis server = fakeredis.FakeServer() redis_client = fakeredis.FakeStrictRedis(server=server) config.registry.settings["fakeredis_server"] = server else: logger.error( 'Redis could not be initialized, DSN %s is not supported!', dsn) # Create a request method that'll get the Redis client in each request. config.add_request_method( lambda request: redis_client, name='redis', reify=True, )
app.jinja_env.cache = {} app.config["UserAgent"]=f"Ruqqus webserver tools for Ruqqus v{_version} developed by Ruqqus LLC for ruqqus.com." if "localhost" in app.config["SERVER_NAME"]: app.config["CACHE_TYPE"]="null" else: app.config["CACHE_TYPE"]="redis" app.config["CACHE_REDIS_URL"]=environ.get("REDIS_URL", environ.get("REDIS_URL")) app.config["CACHE_DEFAULT_TIMEOUT"]=60 app.config["CACHE_KEY_PREFIX"]="flask_caching_" MAX_REDIS_CONNS = int(environ.get("MAX_REDIS_CONNS", 6)) pool = BlockingConnectionPool(max_connections=MAX_REDIS_CONNS) app.config['CACHE_OPTIONS'] = {'connection_pool': pool, 'max_connections': MAX_REDIS_CONNS} Markdown(app) cache=Cache(app) Compress(app) app.config["RATELIMIT_STORAGE_URL"]=app.config["CACHE_REDIS_URL"] app.config["RATELIMIT_KEY_PREFIX"]="flask_limiting_" limiter = Limiter( app, key_func=get_remote_address, default_limits=["100/minute"],
"""Redis-based caching of per-Principal per-app access policy.""" import contextlib import json import logging import pickle from django.conf import settings from redis import BlockingConnectionPool, exceptions from redis.client import Redis logger = logging.getLogger(__name__) # pylint: disable=invalid-name _connection_pool = BlockingConnectionPool( max_connections=10, **settings.REDIS_CACHE_CONNECTION_PARAMS # should match gunicorn.threads ) class BasicCache: """Basic cache class to be inherited.""" def __init__(self): """Init the class.""" self._connection = None @property def connection(self): """Get Redis connection from the pool.""" if not self._connection: self._connection = Redis(connection_pool=_connection_pool) try: self._connection.ping()
def __init__(self): self.redisClient = Redis(connection_pool=BlockingConnectionPool())
logging.config.dictConfig(logconf) # Init Celery app CELERY = Celery(config_source=CFG['celery']) CELERY.register_task(Discover(CFG)) # Init Flask app from . import web APP = web.get_app(CFG) # Initialize CORS support cors = CFG.get('cors') if cors: CORS(APP, origins=cors) # Initialize Celery and Redis APP.celery = CELERY APP.redis_db = StrictRedis( connection_pool=BlockingConnectionPool.from_url( CFG['redis_uri'], max_connections=50, timeout=CFG.get('redis_timeout', 10), decode_responses=True ) ) # ensure the instance folder exists try: os.makedirs(APP.instance_path) except OSError: pass
import os import pickle import requests from bs4 import BeautifulSoup from huey import RedisHuey, crontab from pushbullet import Pushbullet from redis import StrictRedis, BlockingConnectionPool url = os.getenv('URL', 'https://shop.lego.com/en-CH/Millennium-Falcon-75192') redis_host = os.getenv('REDIS_HOST', 'localhost') redis_port = os.getenv('REDIS_PORT', 6379) redis_db = os.getenv('REDIS_DB', 0) pushbullet_api_key = os.getenv('PUSHBULLET_API_KEY', '') pool = BlockingConnectionPool(host=redis_host, port=redis_port, db=redis_db) redis = StrictRedis(connection_pool=pool) huey = RedisHuey('millennium-falcon-checker', connection_pool=pool) pb = Pushbullet(pushbullet_api_key) @huey.periodic_task(crontab(minute='*/30')) def check_status_task(): check_status() def check_status(): millennium_falcon_response = requests.get(url) try: millennium_falcon_response.raise_for_status() soup = BeautifulSoup(millennium_falcon_response.content, 'html.parser')