def loadNodes(self, delayed=True): self.__logging.info('Start loading units') try: self.execute_tasks(delayed) try: max_group = int(EllAsyncResult( self.__node.meta_session.read_data(keys.MASTERMIND_MAX_GROUP_KEY), EllReadResult).get()[0].data) except: max_group = 0 curr_max_group = max((g.group_id for g in storage.groups)) if curr_max_group > max_group: EllAsyncResult(self.__node.meta_session.write_data( keys.MASTERMIND_MAX_GROUP_KEY, str(curr_max_group)), EllLookupResult).get() except Exception as e: self.__logging.error("Error while loading node stats: %s\n%s" % (str(e), traceback.format_exc())) finally: reload_period = config.get('nodes_reload_period', 60) self.__tq.add_task_in("load_nodes", reload_period, self.loadNodes) self.__nodeUpdateTimestamps = self.__nodeUpdateTimestamps[1:] + (time.time(),) bla.setConfigValue("dynamic_too_old_age", max(time.time() - self.__nodeUpdateTimestamps[0], reload_period * 3))
def node_statistics_update(self): try: with self.__cluster_update_lock: start_ts = time.time() logger.info('Cluster updating: node statistics collecting started') self.monitor_stats() try: max_group = int(self.__node.meta_session.read_data( keys.MASTERMIND_MAX_GROUP_KEY).get()[0].data) except Exception as e: logger.error('Failed to read max group number: {0}'.format(e)) max_group = 0 if not len(storage.groups): logger.warn('No groups found in storage') return curr_max_group = max((g.group_id for g in storage.groups)) logger.info('Current max group in storage: {0}'.format(curr_max_group)) if curr_max_group > max_group: logger.info('Updating storage max group to {0}'.format(curr_max_group)) self.__node.meta_session.write_data( keys.MASTERMIND_MAX_GROUP_KEY, str(curr_max_group)).get() except Exception as e: logger.info('Failed to fetch node statistics: {0}\n{1}'.format(e, traceback.format_exc())) finally: logger.info('Cluster updating: node statistics collecting finished, time: {0:.3f}'.format(time.time() - start_ts)) reload_period = config.get('nodes_reload_period', 60) self.__tq.add_task_in('node_statistics_update', reload_period, self.node_statistics_update) self.__nodeUpdateTimestamps = self.__nodeUpdateTimestamps[1:] + (time.time(),) bla.setConfigValue("dynamic_too_old_age", max(time.time() - self.__nodeUpdateTimestamps[0], reload_period * 3))
def loadNodes(self, delayed=True): self.__logging.info('Start loading units') try: self.execute_tasks(delayed) try: max_group = int( EllAsyncResult( self.__node.meta_session.read_data( keys.MASTERMIND_MAX_GROUP_KEY), EllReadResult).get()[0].data) except: max_group = 0 curr_max_group = max((g.group_id for g in storage.groups)) if curr_max_group > max_group: EllAsyncResult( self.__node.meta_session.write_data( keys.MASTERMIND_MAX_GROUP_KEY, str(curr_max_group)), EllLookupResult).get() except Exception as e: self.__logging.error("Error while loading node stats: %s\n%s" % (str(e), traceback.format_exc())) finally: reload_period = config.get('nodes_reload_period', 60) self.__tq.add_task_in("load_nodes", reload_period, self.loadNodes) self.__nodeUpdateTimestamps = self.__nodeUpdateTimestamps[1:] + ( time.time(), ) bla.setConfigValue( "dynamic_too_old_age", max(time.time() - self.__nodeUpdateTimestamps[0], reload_period * 3))
def loadNodes(self): self.__logging.info("Start loading units") try: raw_stats = self.__session.stat_log() for raw_node in raw_stats: bla.add_raw_node(raw_node) for group_id in bla.all_group_ids(): self.__tq.add_task_in( get_symm_group_update_task_id(group_id), get_config_value("symm_group_read_gap", 1), self.updateSymmGroup, group_id) try: max_group = int(self.__node.meta_session.read(mastermind_max_group_key)) except: max_group = 0 curr_max_group = max(bla.all_group_ids()) if curr_max_group > max_group: self.__node.meta_session.write(mastermind_max_group_key, str(curr_max_group)) except Exception as e: self.__logging.error("Error while loading node stats: %s\n%s" % (str(e), traceback.format_exc())) finally: self.__tq.add_task_in("load_nodes", get_config_value("nodes_reload_period", 60), self.loadNodes) self.__nodeUpdateTimestamps = self.__nodeUpdateTimestamps[1:] + (time.time(),) bla.setConfigValue("dynamic_too_old_age", time.time() - self.__nodeUpdateTimestamps[0])
def node_statistics_update(self): try: with self.__cluster_update_lock: start_ts = time.time() logger.info( 'Cluster updating: node statistics collecting started') self.monitor_stats() try: max_group = int( self.__node.meta_session.read_data( keys.MASTERMIND_MAX_GROUP_KEY).get()[0].data) except Exception as e: logger.error( 'Failed to read max group number: {0}'.format(e)) max_group = 0 if not len(storage.groups): logger.warn('No groups found in storage') return curr_max_group = max((g.group_id for g in storage.groups)) logger.info( 'Current max group in storage: {0}'.format(curr_max_group)) if curr_max_group > max_group: logger.info('Updating storage max group to {0}'.format( curr_max_group)) self.__node.meta_session.write_data( keys.MASTERMIND_MAX_GROUP_KEY, str(curr_max_group)).get() except Exception as e: logger.error('Failed to fetch node statistics: {0}\n{1}'.format( e, traceback.format_exc())) finally: logger.info( 'Cluster updating: node statistics collecting finished, time: {0:.3f}' .format(time.time() - start_ts)) reload_period = config.get('nodes_reload_period', 60) self.__tq.add_task_in('node_statistics_update', reload_period, self.node_statistics_update) self.__nodeUpdateTimestamps = self.__nodeUpdateTimestamps[1:] + ( time.time(), ) bla.setConfigValue( "dynamic_too_old_age", max(time.time() - self.__nodeUpdateTimestamps[0], reload_period * 3))