def __init__(self, node, niu, job_processor, db): self.node = node self.niu = niu self.session = elliptics.Session(self.node) wait_timeout = config.get('elliptics', {}).get('wait_timeout', 5) self.session.set_timeout(wait_timeout) self.service_metakey = str( self.session.transform(keys.SYMMETRIC_GROUPS_KEY)) try: keys_db_uri = config['metadata']['cache']['db'] except KeyError: logger.error('Config parameter metadata.cache.db is required ' 'for cache manager') raise self.keys_db = Collection(db[keys_db_uri], 'keys') self.distributor = CacheDistributor( self.node, self.keys_db, job_processor) self.top_keys = {} self.__tq = timed_queue.TimedQueue() self.nodes_update() self.update_cache_groups() self.top_update_timer = periodic_timer( seconds=CACHE_CFG.get('top_update_period', 1800)) self.__tq.add_task_at( CacheManager.MONITOR_TOP_STATS, self.top_update_timer.next(), self.monitor_top_stats)
def search_by_history_record(self, start_ts=None, finish_ts=None, type=None): record_pattern = {} if type: record_pattern.update(Collection.condition('type', type)) if start_ts or finish_ts: timestamp_pattern = {} if start_ts: timestamp_pattern['$gte'] = start_ts if finish_ts: timestamp_pattern['$lt'] = finish_ts record_pattern['timestamp'] = timestamp_pattern or_pattern = {'$or': [ { 'couples': { '$elemMatch': record_pattern, } }, { 'nodes': { '$elemMatch': record_pattern, } }, ]} ghs = [] for group_history_record in self.collection.find(or_pattern): gh = GroupHistory(**group_history_record) gh.collection = self.collection ghs.append(gh) return ghs
def search_by_history_record(self, start_ts=None, finish_ts=None, type=None): record_pattern = {} if type: record_pattern.update(Collection.condition("type", type)) if start_ts or finish_ts: timestamp_pattern = {} if start_ts: timestamp_pattern["$gte"] = start_ts if finish_ts: timestamp_pattern["$lt"] = finish_ts record_pattern["timestamp"] = timestamp_pattern or_pattern = {"$or": [{"couples": {"$elemMatch": record_pattern}}, {"nodes": {"$elemMatch": record_pattern}}]} ghs = [] for group_history_record in self.collection.find(or_pattern): gh = GroupHistory(**group_history_record) gh.collection = self.collection ghs.append(gh) return ghs
def __init__(self, job_finder, node, db, niu, minions): logger.info('Starting JobProcessor') self.job_finder = job_finder self.session = elliptics.Session(node) wait_timeout = config.get('elliptics', {}).get('wait_timeout', None) or \ config.get('wait_timeout', 5) self.session.set_timeout(wait_timeout) self.meta_session = node.meta_session self.minions = minions self.node_info_updater = niu self.planner = None self.__tq = timed_queue.TimedQueue() self.jobs_timer = periodic_timer( seconds=JOB_CONFIG.get('execute_period', 60)) self.downtimes = Collection(db[config['metadata']['jobs']['db']], 'downtimes') self.__tq.add_task_at(self.JOBS_EXECUTE, self.jobs_timer.next(), self._execute_jobs)
def __init__(self, db): self.collection = Collection(db[config['metadata']['jobs']['db']], 'jobs')
def __init__(self, db): self.collection = Collection(db[config['metadata']['history']['db']], 'history')
def __init__(self, db): self.collection = Collection(db[config['metadata']['statistics']['db']], 'couple_free_effective_space')