def test_slave(self): master = redislite.Redis(serverconfig={'port': '7000'}) slave = redislite.Redis(serverconfig={'slaveof': 'localhost 7000'}) master.set('key', 'value') time.sleep(.5) value = slave.get('key').decode(encoding='UTF-8') self.assertEquals(value, 'value')
def test_redislite_Redis_multiple(self): r1 = redislite.Redis() r2 = redislite.Redis() r1.set('key', 'value') r2.set('key2', 'value2') self.assertTrue(len(r1.keys()), 1) self.assertTrue(len(r2.keys()), 1) result = r1.get('key').decode(encoding='UTF-8') self.assertEqual(result, 'value') result = r2.get('key2').decode(encoding='UTF-8') self.assertEqual(result, 'value2')
def test_redislite_Redis_multiple_connections(self): # Generate a new redis server r = redislite.Redis() # Pass the first server's db to get a second connection s = redislite.Redis(r.db) r.set('key', 'value') result = s.get('key').decode(encoding='UTF-8') self.assertEqual(result, 'value') # Both objects should be using the same redis process. self.assertEqual(r.pid, s.pid)
def test_redislite_Redis(self): r = redislite.Redis() self._log_redis_pid(r) r.set('key', 'value') result = r.get('key').decode(encoding='UTF-8') self.assertEqual(result, 'value')
def setUpClass(cls): cls.maxDiff = None config.rdb = redislite.Redis('redis.db') cls.marathon = {"protocol": "http", "host": "some-marathon.com", "apps": "/v2/apps", "username": "******", "password": "******", "blacklist": [".*marathon-healthcheck"], "root_app_lable": "ROOT_APP", "status_path_lable": "STATUS_PATH", "base_domain": "some-domain.com"} cls.app = {"id": "/group/vertical/name", "env": {"STATUS_PATH": "/service/internal/status"}, "instances": 1, "cpus": 1, "mem": 1024, "version": "2016-05-26T07:15:05.585Z", "versionInfo": {"lastScalingAt": "2016-05-26T07:15:05.585Z", "lastConfigChangeAt": "2016-05-26T07:15:05.585Z"}, "tasksStaged": 0, "tasksRunning": 1, "tasksHealthy": 1, "tasksUnhealthy": 0, "deployments": [], "labels": {}} cls.app_server_service = {"id": "/group/vertical/name", "url": "http://some-domain.com/service/internal/status"} cls.marathon_apps = {"apps": [cls.app, {"id": "/develop/mesos/marathon-healthcheck", "instances": 2, "cpus": 0.01, "mem": 4}]} cls.marathon_apps_json = json.dumps(cls.marathon_apps)
def test_redislite_Redis_create_redis_directory_tree(self): r = redislite.Redis() r._create_redis_directory_tree() self.assertTrue(r.redis_dir) self.assertTrue(os.path.exists(r.redis_dir)) self.assertTrue(os.path.exists(r.pidfile)) self.assertTrue(os.path.exists(r.socket_file)) r._cleanup()
def setUpClass(cls): config.rdb = redislite.Redis('redis.db') cls.test_apps = [ testdata_helper.get_task(name='dog', vertical='mammal'), testdata_helper.get_task(name='cat', vertical='mammal'), testdata_helper.get_task(name='salmon', vertical='fish') ] cls.maxDiff = None
def test_refcount_cleanup(self): self.logger.debug('Setting up 2 connections to a single redis server.') r = redislite.Redis() self._log_redis_pid(r) s = redislite.Redis(r.db) self._log_redis_pid(s) pid = r.pid redis_dir = r.redis_dir self.logger.debug('Making sure the redis-server is running') p = psutil.Process(pid) self.assertTrue(p.is_running()) self.logger.debug( 'Shuting down the first instance, redis-server should remain ' 'running due to the other connection.' ) r._cleanup() self.assertTrue( os.path.exists(redis_dir), msg='Shutting down the server removed the temporary directory' ) self.assertEqual( s.pid, pid, msg='Redis server shutdown with active connection' ) p = psutil.Process(pid) self.assertTrue( p.is_running(), msg='Redis server shutdown with active connection' ) self.logger.debug( 'Shutting down the second instance, the redis-server should ' 'be gone after the connection terminates' ) self.logger.debug( 'Second connection count is: %s', s._connection_count() ) s._cleanup() with self.assertRaises(psutil.NoSuchProcess): p = psutil.Process(pid)
def test_connection_fallthrough(self): """ Create a connection with an argument that will cause redislite to pass it to the redis module. This should generate an exception since there is no redis server running on the port. :return: """ with self.assertRaises(ConnectionError): redislite.Redis(port=1).keys()
def test_redislite_db_file_cwd_kw(self): test_db = 'test_unit_redis.db' if os.path.exists(test_db): os.remove(test_db) r = redislite.Redis(dbfilename=test_db) r.set('key', 'value') r.save() self.assertTrue(os.path.exists(test_db)) os.remove(test_db)
def test_redis_session_2(self): app = flask.Flask(__name__) redis_conn = redislite.Redis('/tmp/session_redis.db') app.secret_key = 'secret key' app.session_cookie_name = 'session_cookie' app.config['SESSION_TYPE'] = 'redis' app.config['SESSION_REDIS'] = redis_conn app.config['SESSION_USE_SIGNER'] = True app.config['SESSION_PERMANENT'] = False app.config['SESSION_KEY_PREFIX'] = 'redis_app_2' RestySharedSession(app) @app.route('/set-groups', methods=['POST']) def set_groups(): body = json.loads(flask.request.data) flask.session['groups'] = body['groups'] return flask.jsonify(status='ok') @app.route('/get-groups') def get_groups(): return flask.jsonify(groups=list(flask.session.get('groups', []))) @app.route('/delete-groups', methods=['POST']) def delete_groups(): if 'groups' in flask.session: del flask.session['groups'] return flask.jsonify(status='deleted') @app.errorhandler(500) def on_err(err): print("ERR: %r" % err) traceback.print_exception(*sys.exc_info()) return "ERROR: %r" % err c = app.test_client() response = c.post('/set-groups', data=json.dumps({'groups': ['one', 'two']}), headers={'Content-Type': 'application/json'}) self.assertEqual({'status': 'ok'}, json.loads(response.data)) cookie = get_response_cookie(response, 'session_cookie') session_id = cookie[:cookie.index('.')] session_sig = cookie[cookie.index('.') + 1:] stored_sig = redis_conn.get('redis_app_2:signature:' + session_id) self.assertEqual(session_sig, stored_sig) stored_groups = redis_conn.smembers('redis_app_2:groups:' + session_id) self.assertEqual(set(['one', 'two']), stored_groups) stored_data = redis_conn.get('redis_app_2:data:' + session_id) self.assertTrue(stored_data is not None) response = c.get('/get-groups') self.assertEqual({'groups': ['one', 'two']}, json.loads(response.data)) response = c.post('/delete-groups') self.assertEqual({'status': 'deleted'}, json.loads(response.data))
def work(identification, graph_name, step_to_do, redis_fn): global redis_connection, strict_redis_connection, sparql_server, step, step_graph step = step_to_do log('work ' + '[' + str(step) + ']') #for Collections step_graph = ConjunctiveGraph(sparqlstore.SPARQLStore(sparql_uri), graph_name) sparql_server = sparql.SPARQLServer(sparql_uri) redis_connection = redislite.Redis(redis_fn) strict_redis_connection = redislite.StrictRedis(redis_fn) gv_output_file_name = identification + '_' + str(step).zfill(7) + '.gv' if list(subjects(RDF.type, kbdbg.frame)) == []: log('no frames.' + '[' + str(step) + ']') put_last_bindings(step, []) return if (step == global_start - 1): gv_output_file_name = 'dummy' try: os.unlink(gv_output_file_name) except FileNotFoundError: pass gv_output_file = open(gv_output_file_name, 'w') e = Emitter(gv_output_file, step) e.generate_gv_image() gv_output_file.close() if (step == global_start - 1): return log('convert..' + '[' + str(step) + ']') #cmd, args = subprocess.check_output, ("convert", '-regard-warnings', "-extent", '6000x3000', gv_output_file_name, '-gravity', 'NorthWest', '-background', 'white', gv_output_file_name + '.svg') cmd, args = subprocess.check_output, ("dot", '-Tsvg', gv_output_file_name, '-O') try: r = cmd(args, stderr=subprocess.STDOUT) if r != b"": raise RuntimeError('[' + str(step) + '] ' + str(r)) except subprocess.CalledProcessError as e: log('[' + str(step) + ']' + e.output) log('convert done.' + '[' + str(step) + ']') if len(stats): print('stats:') for i in stats: print(i) #stats.clear() redis_connection._cleanup() strict_redis_connection._cleanup()
def test_cache_is_hit_on_second_req(apigw_event, mocker): redis = redislite.Redis() mocker.patch("entity_extractor.app.get_redis", return_value=redis) result = app.lambda_handler(apigw_event, "") result = app.lambda_handler(apigw_event, "") data = json.loads(result["body"]) assert result["statusCode"] == 200 assert data["cache_hit"] == True
def test_redislite_db_file_cwd_args(self): test_db = 'test_unit_redis.db' if os.path.exists(test_db): os.remove(test_db) r = redislite.Redis(test_db) self._log_redis_pid(r) r.set('key', 'value') r.save() self.assertTrue(os.path.exists(test_db)) os.remove(test_db)
def test_redislite_Redis_with_db_file_keyword(self): temp_dir = tempfile.mkdtemp() filename = os.path.join(temp_dir, 'redis.db') self.assertFalse(os.path.exists(filename)) r = redislite.Redis(dbfilename=filename) r.set('key', 'value') result = r.get('key').decode(encoding='UTF-8') self.assertEqual(result, 'value') r.save() r._cleanup() self.assertTrue(os.path.exists(filename)) shutil.rmtree(temp_dir)
def test_cache_is_set_on_first_req(apigw_event, mocker): redis = redislite.Redis() mocker.patch("entity_extractor.app.get_redis", return_value=redis) result = app.lambda_handler(apigw_event, "") hashed_key = hashlib.md5(text.encode('utf-8')).hexdigest() data = json.loads(result["body"]) # Assert that Redis cache is set, request is successful and cache isn't hit assert redis.get(hashed_key) == json.dumps( (["Alex", "PERSON"], ["test", "TEST_LABEL"])).encode() assert result["statusCode"] == 200 assert data["cache_hit"] == False
def _ensure_redis(redis: Optional[Redis]): if redis: return redis try: import redislite return redislite.Redis() except ImportError: raise ValueError( "Redis instance not given and redislite not importable. Run\n" "pip install redislite")
def test_redislite_redis_custom_socket_file(self): """ Test creating a redis instance with a specified socket filename :return: """ socket_file_name = '/tmp/test.socket' r = redislite.Redis(unix_socket_path=socket_file_name) self._log_redis_pid(r) self.assertEqual(r.socket_file, socket_file_name) print(os.listdir('.')) mode = os.stat(socket_file_name).st_mode isSocket = stat.S_ISSOCK(mode) self.assertTrue(isSocket) r._cleanup()
def test_auth(self): redis_server = redislite.Redis( serverconfig={ 'requirepass': '******', 'port': self.redis_test_port+1 }, password='******' ) # This shouldn't generate an exception try: redis_client = redis.Redis(host='127.0.0.1', port=self.redis_test_port+1, password='******') uredis_client = uredis.Redis(host='127.0.0.1', port=self.redis_test_port+1, password='******') finally: redis_server.shutdown()
def redislite(tmp_path_factory): import redislite rds: redislite.Redis tmp = tmp_path_factory.mktemp("redislite", numbered=True) print(tmp) tmpfile = tmp / "pymq_test.db" rds = redislite.Redis(str(tmpfile), decode_responses=True) rds.get("dummykey") # run a first command to initiate yield rds rds.shutdown() os.remove(rds.redis_configuration_filename) os.remove(rds.settingregistryfile) shutil.rmtree(rds.redis_dir)
def test_persistent_cache_redis(monkeypatch, with_flag): import redislite server = redislite.Redis() monkeypatch.delenv("MGE_FASTRUN_CACHE_TYPE", raising=False) monkeypatch.setenv("MGE_FASTRUN_CACHE_URL", "redis+socket://{}".format(server.socket_file)) if with_flag: server.set("mgb-cache-flag", 1) pc = PersistentCacheOnServer() pc.put("test", "hello", "world") if with_flag: pc = PersistentCacheOnServer() assert pc.get("test", "hello") == b"world" assert pc.config.type == "redis" else: assert pc.config.type == "in-file"
def run(start, end, workers): global global_start, graph_name_start global_start = start redis_fn = redislite.Redis().db if workers: worker_pool = ProcessPoolExecutor(max_workers=workers) runs_graph = Graph(sparqlstore.SPARQLStore(sparql_uri), default_graph) graph_name_start = runs_graph.value(kbdbg.latest, kbdbg['is'], any=False).toPython() identification = fix_up_identification(graph_name_start) step_to_submit = -1 for step_graph_uri in profile( list, (Collection(runs_graph, URIRef(graph_name_start)), )): step_to_submit += 1 if step_to_submit < start - 1: log("skipping [" + str(step_to_submit) + ']') continue if step_to_submit > end and end != -1: log("ending") break args = (identification, step_graph_uri, step_to_submit, redis_fn) if not workers: work(*args) else: log('submit ' + '[' + str(step_to_submit) + ']' + ' (queue size: ' + str(len(futures)) + ')') if len(futures) > workers: time.sleep(len(futures) - workers) fut = worker_pool.submit(work, *args) fut.step = step_to_submit futures.append(fut) log('submitted ') check_futures() log('loop ') if workers: worker_pool.shutdown() check_futures()
def test_redis_session_1(self): app = flask.Flask(__name__) redis_conn = redislite.Redis('/tmp/session_redis.db') app.secret_key = 'secret key' app.config['SESSION_TYPE'] = 'redis' app.config['SESSION_REDIS'] = redis_conn app.config['SESSION_USE_SIGNER'] = True app.config['SESSION_PERMANENT'] = False RestySharedSession(app) @app.route('/set', methods=['POST']) def set(): flask.session['value'] = flask.request.form['value'] return 'value set' @app.route('/get') def get(): return flask.session['value'] @app.route('/delete', methods=['POST']) def delete(): del flask.session['value'] return 'value deleted' @app.errorhandler(500) def on_err(err): print("ERR: %r" % err) traceback.print_exception(*sys.exc_info()) return "ERROR: %r" % err c = app.test_client() self.assertEqual( c.post('/set', data={ 'value': '42' }).data, b'value set') self.assertEqual(c.get('/get').data, b'42') c.post('/delete')
def create_app(port, environment, working_dir, greedy_mode): flask = Flask(__name__) flask.config.from_pyfile('config.py') flask.jinja_env.filters['ceil'] = view_util.ceil config_loader = ConfigLoader(verify=False) config.info = config_loader.load_application_info("./") config.config = config_loader.load_config("resources/", environment, fill_with_defaults=True) config.rdb = redislite.Redis(working_dir + 'redis.db') config.rdb.flushall() config.rdb.flushdb() start_tasks(config.config, greedy_mode) register_status_page(flask, config.rdb, config.info, environment, port) flask.register_blueprint(views.blueprint) flask.register_blueprint(styleguide.blueprint) return flask
def work(serialized_graph, input_file_name, step, no_parallel, redis_fn): global redis_connection, strict_redis_connection strict_redis_connection = redis_fn log('work ' + '[' + str(step) + ']') redis_connection = redislite.Redis(redis_fn) strict_redis_connection = redislite.StrictRedis(redis_fn) gv_output_file_name = input_file_name + '_' + str(step).zfill(5) + '.gv' log('loads ' + '[' + str(step) + ']') g = pickle.loads(serialized_graph) #g = Graph(OrderedAndIndexedStore()) #for i in ujson.loads(serialized_graph): # g.add(i) #log('work' + str(id(g)) + ' ' + str(id(g.store)) + ' ' + str(id(g.store.indexes)) + ' ' + str(id(g.store.indexes['ttft'])) + ' ' + str(id(g.store.indexes['ttft'][rdflib.URIRef('http://kbd.bg/Rule1')]))) g.store.locked = True if list(g.subjects(RDF.type, kbdbg.frame)) == []: log('no frames.' + '[' + str(step) + ']') put_last_bindings(step, []) return if (step == global_start - 1): gv_output_file_name = 'dummy' try: os.unlink(gv_output_file_name) except FileNotFoundError: pass gv_output_file = open(gv_output_file_name, 'w') e = Emitter(g, gv_output_file, step) e.generate_gv_image() gv_output_file.close() if (step == global_start - 1): return log('convert..' + '[' + str(step) + ']') #cmd, args = subprocess.check_output, ("convert", '-regard-warnings', "-extent", '6000x3000', gv_output_file_name, '-gravity', 'NorthWest', '-background', 'white', gv_output_file_name + '.svg') cmd, args = subprocess.check_output, ("dot", '-Tsvg', gv_output_file_name, '-O') if True: try: r = cmd(args, stderr=subprocess.STDOUT) if r != b"": raise RuntimeError('[' + str(step) + '] ' + str(r)) except subprocess.CalledProcessError as e: log('[' + str(step) + ']' + e.output) log('convert done.' + '[' + str(step) + ']') else: def do_or_die(args): r = cmd(args, stderr=subprocess.STDOUT) if r != b"": log(r) raise RuntimeError(r) #exit() futures.append(graphviz_pool.submit(do_or_die, args)) redis_connection._cleanup() strict_redis_connection._cleanup()
def run(start, end, no_parallel, graphviz_workers, workers, input_file_name): global global_start global_start = start input_file = open(input_file_name) lines = [] #os.system("rm -f kbdbg"+fn+'\\.*') if no_parallel: graphviz_workers = 0 if graphviz_workers == -1: graphviz_workers = available_cpus() if graphviz_workers == None: graphviz_workers = 4 if graphviz_workers == 0: no_parallel = True #graphviz_pool = ProcessPoolExecutor(max_workers = graphviz_workers) #worker_pool = ThreadPoolExecutor(max_workers = workers) worker_pool = ProcessPoolExecutor(max_workers=workers) g = Graph(OrderedAndIndexedStore()) redis_fn = redislite.Redis().db prefixes = [] while True: l = input_file.readline() if l == "": break if l.startswith("#step"): step = int(l[5:l.find(' ')]) elif l.startswith('@prefix'): prefixes.append(l) continue else: lines.append(l) continue if step < start - 1: log("skipping [" + str(step) + ']') continue if step > end and end != -1: log("ending") break log('parse ' + '[' + str(step) + ']') g.parse(data="".join(prefixes + lines), format='n3') lines = [] log('pickle ' + '[' + str(step) + ']') pickled_graph = pickle.dumps(g) #pickled_graph = ujson.dumps(g) args = (pickled_graph, input_file_name, step, no_parallel, redis_fn) if no_parallel: work(*args) else: log('submit ' + '[' + str(step) + ']' + ' (queue size: ' + str(len(futures)) + ')') if len(futures) > workers: time.sleep(len(futures) - workers) fut = worker_pool.submit(work, *args) fut.step = step futures.append(fut) log('submitted ') check_futures() log('loop ') worker_pool.shutdown() #graphviz_pool.shutdown() check_futures()
def redis_server(): instance = redislite.Redis(serverconfig={"port": "6380"}) yield
def setUp(self): self.tmpfile = tempfile.mktemp('.db', 'galileo_test_') self.rds = redislite.Redis(self.tmpfile, decode_responses=True) self.rds.get('dummykey') # run a first command to initiate
def setUp(self): self.redis_server = redislite.Redis( serverconfig={'port': self.redis_test_port}) self.uredis_client = uredis.Redis(host='127.0.0.1', port=self.redis_test_port)
def get_cache(logger: gluetool.log.ContextAdapter) -> redislite.Redis: return redislite.Redis(dbfilename=str(redis_db_file))