def get_redlock_factory(config: Config) -> RedLockFactory: return RedLockFactory(connection_details=[{ 'host': config.redis_host, 'port': config.redis_port, 'password': config.redis_password, 'db': 1 }])
def test_factory_create(): factory = RedLockFactory([{"host": "localhost"}]) lock = factory.create_lock("test_factory_create", ttl=500, retry_times=5, retry_delay=100) assert factory.redis_nodes == lock.redis_nodes assert factory.quorum == lock.quorum assert lock.ttl == 500 assert lock.retry_times == 5 assert lock.retry_delay == 100 assert lock.factory == factory
def _redlock_factory(config): _redis_info = dict(config["USER_EVENTS_REDIS"]) _redis_info.update( { "socket_connect_timeout": 5, "socket_timeout": 5, "single_connection_client": True, } ) lock_factory = RedLockFactory(connection_details=[_redis_info]) return lock_factory
def test_factory_create_from_url(): factory = RedLockFactory([{"url": "redis://localhost/0"}]) lock = factory.create_lock( "test_factory_create_from_url", ttl=500, retry_times=5, retry_delay=100 ) assert factory.redis_nodes == lock.redis_nodes assert factory.quorum == lock.quorum assert lock.ttl == 500 assert lock.retry_times == 5 assert lock.retry_delay == 100 assert lock.factory == factory
def main(): parser = argparse.ArgumentParser() parser.add_argument("--qremis_api_url", help="The URL of the qremis API", required=True, type=str) parser.add_argument("--archstor_api_url", help="The URL of the archstor API", required=True, type=str) # TODO: Figure out how to parse all the address details # out of these args parser.add_argument("--locking_server", help="The addresses of the redis " + "locking servers", required=True, type=str, action='append') parser.add_argument("--delay", help="The delay between actions", type=float, default=.1) parser.add_argument("-v", "--verbosity", help="Logging verbosity", type=str, default="WARN") parser.add_argument("--fixity_max_age", help="How long ago a fixity event" + "can have occured before we run another - in seconds", type=int, default=60 * 60 * 24 * 7 * 4) args = parser.parse_args() logging.basicConfig(level=args.verbosity) logging.getLogger("urllib3").setLevel("WARN") fixity_check_cb = partial(fixity_check, args.archstor_api_url) filter_cb = partial(no_fixity_for, args.fixity_max_age) fac = RedLockFactory(connection_details=[{ "host": x } for x in args.locking_server]) spider = QremisApiSpider(args.qremis_api_url, filter_cb, fixity_check_cb, fac) spider.crawl(delay=args.delay)
def __init__(self, tenant=None, service_name=None, redis_server=None): conn_info = current_app.config.get("redis_connection_info") if not redis_server: redis_server = current_app.config.get("redis_server", None) self.disabled = conn_info.get("disabled", False) if self.disabled: log.warning("Redis is disabled!") return if not redis_server: try: redis_server = g.driftenv_objects["redis_server"] except Exception: log.info( "'redis_server' not found in config. Using default server '%s'", conn_info["host"]) redis_server = conn_info["host"] self.tenant = tenant or g.driftenv["name"] self.service_name = service_name or current_app.config["name"] self.host = redis_server self.port = conn_info["port"] self.conn = redis.StrictRedis( host=self.host, port=self.port, socket_timeout=conn_info.get("socket_timeout", 5), socket_connect_timeout=conn_info.get("socket_connect_timeout", 5), db=REDIS_DB, ) self.key_prefix = "{}.{}:".format(self.tenant, self.service_name) self.redlock_factory = RedLockFactory(connection_details=[{ 'host': self.host, 'port': self.port, 'db': REDIS_DB, }], ) log.debug("RedisCache initialized. self.conn = %s", self.conn)
def __init__(self, tenant, service_name, redis_config): self.disabled = redis_config.get("disabled", False) if self.disabled: log.warning("Redis is disabled!") return self.tenant = tenant self.service_name = service_name self.host = redis_config["host"] self.port = redis_config["port"] # Override Redis hostname if needed if os.environ.get('DRIFT_USE_LOCAL_SERVERS', False): self.host = 'localhost' self.conn = redis.StrictRedis( host=self.host, port=self.port, socket_timeout=redis_config.get("socket_timeout", 5), socket_connect_timeout=redis_config.get("socket_connect_timeout", 5), db=redis_config.get("db_number", REDIS_DB), ) self.key_prefix = "{}.{}:".format(self.tenant, self.service_name) self.redlock_factory = RedLockFactory(connection_details=[{ 'host': self.host, 'port': self.port, 'db': redis_config.get("db_number", REDIS_DB), }], ) log.debug("RedisCache initialized. self.conn = %s", self.conn)
from django.conf import settings from redlock import RedLockFactory join_contest_lock = RedLockFactory(connection_details=[{ 'host': settings.REDLOCK_REDIS_SERVER, 'port': settings.REDLOCK_REDIS_PORT, 'password': settings.REDLOCK_REDIS_PAS, 'db': 0 }])
def get_redlock(): redis_url = getattr(settings, 'REDIS_LOCATION', '127.0.0.1:6379') return RedLockFactory([{'url': redis_url}])
def __init__(self, nodes): assert len(nodes) > 0 self.__nodes = nodes self.__redis_pool = ConnectionPool(host=nodes[0]['host'], port=nodes[0]['port']) self.__redis_lock = RedLockFactory(connection_details=nodes)
""" Represents our shared scheduler and distributed locks The scheduler is backed by redis, meaning that jobs can be restored after a restart """ from apscheduler.schedulers.asyncio import AsyncIOScheduler from redlock import RedLockFactory __all__ = ["redlocks", "scheduler"] scheduler = AsyncIOScheduler() scheduler.add_jobstore("redis") redlocks = RedLockFactory([{"host": "127.0.0.1"}]) scheduler.start()
'var058' :np.float64, 'var059' :np.float64, 'var060' :np.float64, 'var061' :np.float64, 'var062' :np.float64, 'var063' :np.float64, 'var064' :np.float64, 'var065' :np.float64, 'var067' :np.float64, 'var068' :np.float64, } from redlock import RedLockFactory factory = RedLockFactory( connection_details=[ {'host': '10.224.38.31', 'port': 8690, 'db': 13, 'password':redis_pass}, #{'host': '10.224.38.43','port': 8690, 'db': 13, 'password':redis_pass }, ]) try: from core.config_local import * except Exception as e: logger.exception(e) logger.debug("There is no local config")
import redis from redlock import RedLockFactory r = redis.Redis() redlock_factory = RedLockFactory(connection_details=[r])
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp, user): if family not in [socket.AF_INET, socket.AF_INET6]: raise Exception( 'Address family "%s" unsupported by tproxy method' % family_to_string(family)) table = "mangle" def _ipt(*args): return ipt(family, table, *args) def _ipt_ttl(*args): return ipt_ttl(family, table, *args) def _ipt_proto_ports(proto, fport, lport): return proto + ('--dport', '%d:%d' % (fport, lport)) \ if fport else proto mark_chain = 'sshuttle-m-%s' % port tproxy_chain = 'sshuttle-t-%s' % port divert_chain = 'sshuttle-d-%s' % port # basic cleanup/setup of chains self.restore_firewall(port, family, udp, user) _ipt('-N', mark_chain) _ipt('-F', mark_chain) _ipt('-N', divert_chain) _ipt('-F', divert_chain) _ipt('-N', tproxy_chain) _ipt('-F', tproxy_chain) if (REDIS_HOST is None or REDIS_PORT is None): raise Fatal( "REDIS_HOST and REDIS_PORT environment variables must both be set!" ) redlockFactory = RedLockFactory([{ "host": REDIS_HOST, "port": REDIS_PORT }]) lock = redlockFactory.create_lock( "SSHUTTLE_TPROXY_INSTANCE_CREATION_LOCK", ttl=500, retry_times=5, retry_delay=100) locked = lock.acquire() if locked == True: rule_count = ipt_rule_count(family, 'mangle', 'PREROUTING') if rule_count == 0: _ipt('-I', 'PREROUTING', '1', '-j', tproxy_chain) else: global iptables_lb_every iptables_lb_every = str(rule_count + 1) _ipt('-I', 'PREROUTING', '1', '-m', 'statistic', '--mode', 'nth', '--every', iptables_lb_every, '--packet', '0', '-j', tproxy_chain) lock.release() else: lock.release() raise Exception('Failed to acquire lock to edit iptable') _ipt('-I', 'OUTPUT', '1', '-j', mark_chain) _ipt('-A', divert_chain, '-j', 'MARK', '--set-mark', '1') _ipt('-A', divert_chain, '-j', 'ACCEPT') # Allow localhost to localhost traffic to bypass sshuttle _ipt('-A', tproxy_chain, '-j', 'RETURN', '--src', '127.0.0.1/32', '--dest', '127.0.0.1/32') _ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain, '-m', 'tcp', '-p', 'tcp') # get the address of eth0 ni.ifaddresses('eth0') myip = ni.ifaddresses('eth0')[2][0]['addr'] # add a rule not route packets that are # generated locally though sshuttle, # unless they're destined for 1.0.0.0 _ipt('-A', mark_chain, '-j', 'RETURN', '--src', '%s/32' % myip, '!', '--dest', '1.0.0.0') # add a rule to not route packets that are # destined to the local address though sshuttle _ipt('-A', mark_chain, '-j', 'RETURN', '--dest', '%s/32' % myip) if udp: _ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain, '-m', 'udp', '-p', 'udp') for _, ip in [i for i in nslist if i[0] == family]: _ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1', '--dest', '%s/32' % ip, '--src', '%s/32' % myip, '-m', 'udp', '-p', 'udp', '--dport', '53') _ipt('-A', tproxy_chain, '-j', 'TPROXY', '--tproxy-mark', '0x1/0x1', '--dest', '%s/32' % ip, '--src', '%s/32' % myip, '-m', 'udp', '-p', 'udp', '--dport', '53', '--on-port', str(dnsport)) for _, swidth, sexclude, snet, fport, lport \ in sorted(subnets, key=subnet_weight, reverse=True): tcp_ports = ('-p', 'tcp') tcp_ports = _ipt_proto_ports(tcp_ports, fport, lport) if sexclude: _ipt('-A', mark_chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-m', 'tcp', *tcp_ports) _ipt('-A', tproxy_chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-m', 'tcp', *tcp_ports) else: _ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1', '--dest', '%s/%s' % (snet, swidth), '-m', 'tcp', *tcp_ports) _ipt('-A', tproxy_chain, '-j', 'TPROXY', '--tproxy-mark', '0x1/0x1', '--dest', '%s/%s' % (snet, swidth), '-m', 'tcp', *(tcp_ports + ('--on-port', str(port)))) if udp: udp_ports = ('-p', 'udp') udp_ports = _ipt_proto_ports(udp_ports, fport, lport) if sexclude: _ipt('-A', mark_chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-m', 'udp', *udp_ports) _ipt('-A', tproxy_chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-m', 'udp', *udp_ports) else: _ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1', '--dest', '%s/%s' % (snet, swidth), '-m', 'udp', '-p', 'udp') _ipt('-A', tproxy_chain, '-j', 'TPROXY', '--tproxy-mark', '0x1/0x1', '--dest', '%s/%s' % (snet, swidth), '-m', 'udp', *(udp_ports + ('--on-port', str(port))))
if parsed.scheme != 'redis': raise URLError('Redis URL does not have the redis scheme') path = parsed.path[1:] or '' query = parse_qs(parsed.query or '') if path: db = int(path) elif 'db' in query: db = int(query['db'][0]) else: db = 0 options = {'host': parsed.hostname, 'port': parsed.port, 'db': db} if parsed.password: options['password'] = parsed.password return options if __name__ == '__main__': redis_url = 'redis://*****:*****@host:1234/?db=1' print(parse_redis_url(redis_url)) exit(0) from redlock import RedLockFactory from rps_cnc import config lock_factory = RedLockFactory( connection_details=[parse_redis_url(url) for url in config.REDIS_MASTERS])
def make_lock_factory(connection_details): return RedLockFactory(connection_details)