def used_channels(): req_channel_keys = r.keys('requests:*') for rck in req_channel_keys: try: channel = r.hget(rck, 'channel') yield channel except Exception as e: log.warning(e.message)
def __check_gp(self): gp_keys = r.keys('fragments:*:gp') for gpk in gp_keys: stored_gp = GraphPattern(r.smembers(gpk)) mapping = stored_gp.mapping(self._graph_pattern) if mapping: return gpk.split(':')[1], mapping return None
def remove(self): with r.pipeline(transaction=True) as p: p.multi() action_id = r.hget(self._request_key, 'id') p.zrem('requests', action_id) r_keys = r.keys('{}*'.format(self._request_key)) for key in r_keys: p.delete(key) self._remove(p) p.execute() log.info('Request {} was removed'.format(self._request_id))
def get_request(rid): if not r.keys('requests:{}'.format(rid)): raise NotFound('The request {} does not exist'.format(rid)) r_dict = filter_hash_attrs('requests:{}'.format(rid), lambda x: not x.startswith('__')) channel = r_dict['channel'] ch_dict = r.hgetall('channels:{}'.format(channel)) broker = r_dict['broker'] br_dict = r.hgetall('brokers:{}'.format(broker)) r_dict['channel'] = ch_dict r_dict['broker'] = br_dict r_dict['pattern'] = "{ %s }" % r_dict['pattern'] if 'mapping' in r_dict: r_dict['mapping'] = eval(r_dict['mapping']) return jsonify(r_dict)
def __remove_fragment(fid): log.debug("Waiting to remove fragment {}...".format(fid)) lock_key = "fragments:{}:lock".format(fid) lock = r.lock(lock_key, lock_class=Lock) lock.acquire() with r.pipeline(transaction=True) as p: requests, r_sinks = __load_fragment_requests(fid) __notify_completion(fid, r_sinks) fragment_keys = r.keys("fragments:{}*".format(fid)) map(lambda k: p.delete(k), fragment_keys) p.srem("fragments", fid) p.execute() log.info("Fragment {} has been removed".format(fid))
def __collect_fragments(): registered_fragments = r.scard("fragments") synced_fragments = len(r.keys("fragments:*:sync")) log.info( """Collector daemon started: - Fragments: {} - Synced: {}""".format( registered_fragments, synced_fragments ) ) futures = {} while True: for fid in filter( lambda x: r.get("fragments:{}:sync".format(x)) is None and r.get("fragments:{}:pulling".format(x)) is None, r.smembers("fragments"), ): if fid in futures: if futures[fid].done(): del futures[fid] if fid not in futures: futures[fid] = thp.submit(__pull_fragment, fid) time.sleep(1)
def get_requests(): request_keys = filter(lambda x: len(x.split(':')) == 2, r.keys('requests:*')) requests = [rk.split(':')[1] for rk in request_keys] return jsonify({"requests": requests})
COLLECT_THROTTLING = max(1, int(app.config.get("PARAMS", {}).get("collect_throttling", 30))) log.info( """Fragment daemon setup: - On-demand threshold: {} - Minimum sync time: {} - Maximum concurrent collectors: {} - Maximum concurrent fragments: {}""".format( ON_DEMAND_TH, MIN_SYNC, N_COLLECTORS, MAX_CONCURRENT_FRAGMENTS ) ) thp = ThreadPoolExecutor(max_workers=min(8, MAX_CONCURRENT_FRAGMENTS)) log.info("Cleaning fragment locks...") fragment_locks = r.keys("*lock*") for flk in fragment_locks: r.delete(flk) log.info("Cleaning fragment pulling flags...") fragment_pullings = r.keys("fragments:*:pulling") for fpk in fragment_pullings: r.delete(fpk) log.info("Releasing registered fragments...") fragment_consumers = r.keys("fragments:*:consumers") for fck in fragment_consumers: r.delete(fck) class FragmentPlugin(object):
def load(self, rid): if not r.keys('requests:{}'.format(rid)): raise ValueError('Cannot load request: Unknown request id {}'.format(rid)) self._request_id = rid self._request_key = 'requests:{}'.format(self._request_id) self._load()