def __init__(self, nodeSet, **kargs): self.nodes = nodeSet node_cls = Node if 'node_cls' in kargs: node_cls = kargs['node_cls'] self.nodes.node_cls = node_cls self.open_list = NodeList() self.close_list = NodeList() pass
def __init__(self): self.status_lock = Lock() # time interval is seconds to update Pods statistics self.time_interval = TIME_INTERVAL config.load_kube_config(config_file=os.path.join( os.path.dirname(__file__), '../kind-config')) self.v1 = client.CoreV1Api() self.all_pods = PodList() self.all_nodes = NodeList() # TODO change to nodeList self.pods_not_to_garbage = []
def update_nodes(self): """ Makes request to API about Nodes in cluster, then starts to add rest of attributes :return: """ self.status_lock.acquire(blocking=True) self.all_nodes = NodeList() print('Updating nodes') for node_ in self.v1.list_node().items: node = Node(node_.metadata, node_.spec, node_.status) node.update_node(self.all_pods) self.all_nodes.items.append(node) self.status_lock.release()
def filter_nodes(self, pod): """ Filter Nodes in self.monitor.all_nodes which can run selected Pod :param pod.Pod pod: Pod to be scheduled :return node.NodeList: List of Node which satisfy Pod requirements """ return_node_list = NodeList() if pod.spec.node_name is not None: for node in self.monitor.all_nodes.items: if pod.spec.node_name == node.metadata.name: return_node_list.items.append(node) else: print('All nodes can be used for Pod %s ' % pod.metadata.name) for node in self.monitor.all_nodes.items: # TODO check labels there and decide if Node can be used for pod return_node_list.items.append(node) return return_node_list
def _schedulers_pipeline(self): self._purge_served_requests() now = cpyutils.eventloop.now() if (now - self._timestamp_mark) > _CONFIGURATION_CLUES.LOGGER_MARK: self._timestamp_mark = now _LOGGER.debug( "--------- LOGGING MARK (everything continues) @ %f" % now) if (self._lrms_nodelist is None) and (self._lrms_joblist is None): # If there is no monitoring information up to now, we will wait till next call _LOGGER.debug( "there is no monitoring info up by now... skipping scheduling") return nodelist = NodeList(self._lrms_nodelist) candidates_on = {} candidates_off = [] monitoring_info = MonitoringInfo(nodelist, self._timestamp_nodelist, self._lrms_joblist, self._timestamp_joblist) for scheduler in self._schedulers: if not scheduler.schedule(self._requests_queue, monitoring_info, candidates_on, candidates_off): _LOGGER.error("failed to schedule with scheduler %s" % str(scheduler)) if len(candidates_off) > 0: _LOGGER.info("nodes %s are considered to be powered off" % str(candidates_off)) for n_id in candidates_off: self.power_off(n_id) if len(candidates_on) > 0: _LOGGER.info("nodes %s are considered to be powered on" % str(candidates_on.keys())) for n_id in candidates_on: self.power_on(n_id)
def get_monitoring_info(self): return MonitoringInfo(NodeList(self._lrms_nodelist), self._timestamp_nodelist, self._lrms_joblist, self._timestamp_joblist)
def get_nodelist(self): if self._lrms_nodelist is not None: return NodeList(self._lrms_nodelist) return NodeList({})
def add_json(self, _json): new_node = self.make_node_from_json(_json, self.root_node) new_node_list = NodeList() new_node_list.append(new_node) self.merge_node_list_to_node(self.root_node, new_node_list)
def get_nodelist(self): if self._lrms_nodelist is not None: return NodeList(self._lrms_nodelist) return NodeList(collections.OrderedDict())