async def get_model(sampler: str): samplers = rj.jsonget("samplers") if sampler not in samplers: raise ServerException( f"Can't find model for sampler='{sampler}'. " f"Valid choices for sampler are {samplers}" ) if f"model-{sampler}" not in rj.keys(): logger.warning("rj.keys() = %s", rj.keys()) flush_logger(logger) raise ServerException(f"Model has not been created for sampler='{sampler}'") rj2 = Client(host="redis", port=6379, decode_responses=False) ir = rj2.get(f"model-{sampler}") model = cloudpickle.loads(ir) return model
def testUsageExampleShouldSucceed(self): "Test the usage example" # Create a new rejson-py client rj = Client(host='localhost', port=port, decode_responses=True) # Set the key `obj` to some object obj = { 'answer': 42, 'arr': [None, True, 3.14], 'truth': { 'coord': 'out there' } } rj.jsonset('obj', Path.rootPath(), obj) # Get something rv = rj.jsonget('obj', Path('.truth.coord')) self.assertEqual(obj['truth']['coord'], rv) # Delete something (or perhaps nothing), append something and pop it value = "something" rj.jsondel('obj', Path('.arr[0]')) rj.jsonarrappend('obj', Path('.arr'), value) rv = rj.jsonarrpop('obj', Path('.arr')) self.assertEqual(value, rv) # Update something else value = 2.17 rj.jsonset('obj', Path('.answer'), value) rv = rj.jsonget('obj', Path('.answer')) self.assertEqual(value, rv) # And use just like the regular redis-py client jp = rj.pipeline() jp.set('foo', 'bar') jp.jsonset('baz', Path.rootPath(), 'qaz') jp.execute() rv1 = rj.get('foo') self.assertEqual('bar', rv1) rv2 = rj.jsonget('baz') self.assertEqual('qaz', rv2)
async def init(ident: str, background_tasks: BackgroundTasks) -> bool: """ Start running an algorithm. Parameters ---------- ident : str The identifier paired with this algorithm. Returns ------- success : bool Notes ----- This function has side effects: it launches background job with algorithm class. This class runs the ``run`` function, which posts queries to Redis and process answers posted to Redis. If the algorithm class has a ``get_query`` method, the class will respond to the API request ``/get_query``. The method ``run`` should be modified to handle this. params : Dict[str, Any] Pulled from the experiment config and Redis. Here's an example YAML configuration: .. code:: yaml targets: - 1 - 2 - 3 - 4 samplers: - Random - random2 - class: Random - foo: bar """ # TODO: Better handling of exceptions if params keys don't match logger.info("backend: initializing %s", ident) config = rj.jsonget("exp_config") try: if f"state-{ident}" in rj.keys(): logger.warning(f"Initializing alg from key 'state-{ident}'") # See https://github.com/andymccurdy/redis-py/issues/1006 rj2 = Client(host="redis", port=6379, decode_responses=False) state = rj2.get(f"state-{ident}") alg = cloudpickle.loads(state) else: logger.warning(f"Initializing alg from config") params = config["samplers"][ident] _class = params.pop("class", ident) Sampler = getattr(samplers, _class) params = {k: _fmt_params(k, v) for k, v in params.items()} logger.warning("Sampler for %s = %s", ident, Sampler) common = config["sampling"]["common"] p = deepcopy(common) p.update(params) kwargs = dict(ident=ident, n=config["n"], **p) logger.warning("class=%s kwargs= %s", _class, kwargs) alg = Sampler(ident=ident, n=config["n"], **p) except Exception as e: msg = exception_to_string(e) logger.error(f"Error on alg={ident} init: {msg}") flush_logger(logger) raise ExpParsingError(status_code=500, detail=msg) SAMPLERS[ident] = alg dask_client = DaskClient("127.0.0.2:8786") logger.info("Before adding init task") background_tasks.add_task(alg.run, dask_client) logger.info("Returning") return True