def __setstate__(self, state): Component.__setstate__(self, state) self.all_nodes = set() self.node_order = {} self.configure() self.queue_assignments = state.get('queue_assignments', {}) nonexistent_queues = [] #make sure we can't try and schedule nodes that don't exist if self.queue_assignments == {}: self.queue_assignments["default"] = set(self.all_nodes) else: #remove nodes that have disappeared for queue, nodes in self.queue_assignments.iteritems(): corrected_nodes = self.all_nodes & set(nodes) if corrected_nodes == set(): nonexistent_queues.append(queue) self.queue_assignments[queue] = corrected_nodes for queue in nonexistent_queues: del self.queue_assignments[queue] self.down_nodes = self.all_nodes & set(state.get('down_nodes', set())) self.process_groups = ProcessGroupDict() self.running_nodes = set() self.alloc_only_nodes = {} # nodename:starttime if not state.has_key("cleaning_processes"): self.cleaning_processes = [] self.cleaning_host_count = {} # jobid:count self.locations_by_jobid = {} #jobid:[locations] self.jobid_to_user = {} #jobid:username self.alloc_timeout = int( get_orcm_system_config("allocation_timeout", 300)) self.logger.info("allocation timeout set to %d seconds." % self.alloc_timeout)
def __setstate__(self, state): Component.__setstate__(self, state) self.msg_queue = state['msg_queue'] self.connected = False self.decoder = LogMessageDecoder() self.clearing_overflow = False self.overflow_filename = None self.overflow_file = None self.max_queued = int(get_cdbwriter_config('max_queued_msgs', '-1')) if self.max_queued <= 0: logger.info("message queue set to unlimited.") self.max_queued = None else: self.overflow_filename = get_cdbwriter_config('overflow_file', None) if self.max_queued and (self.overflow_filename == None): logger.warning("No file given to catch maximum messages. Setting queue size to unlimited.") self.max_queued = None if state.has_key('overflow') and self.max_queued: self.overflow = state['overflow'] else: self.overflow = False
def __setstate__(self, state): Component.__setstate__(self, state) self.msg_queue = state['msg_queue'] self.connected = False self.decoder = LogMessageDecoder() self.clearing_overflow = False self.overflow_filename = None self.overflow_file = None self.max_queued = int(get_cdbwriter_config('max_queued_msgs', '-1')) if self.max_queued <= 0: logger.info("message queue set to unlimited.") self.max_queued = None else: self.overflow_filename = get_cdbwriter_config( 'overflow_file', None) if self.max_queued and (self.overflow_filename == None): logger.warning( "No file given to catch maximum messages. Setting queue size to unlimited." ) self.max_queued = None if state.has_key('overflow') and self.max_queued: self.overflow = state['overflow'] else: self.overflow = False
def __setstate__(self, state): Component.__setstate__(self, state) self.reservations = state['reservations'] if 'active' in state: self.active = state['active'] else: self.active = True self.id_gen = IncrID() self.id_gen.set(state['next_res_id']) global bgsched_id_gen bgsched_id_gen = self.id_gen self.cycle_id_gen = IncrID() self.cycle_id_gen.set(state['next_cycle_id']) global bgsched_cycle_id_gen bgsched_cycle_id_gen = self.cycle_id_gen self.queues = QueueDict() self.jobs = JobDict() self.started_jobs = {} self.sync_state = Cobalt.Util.FailureMode("Foreign Data Sync") self.get_current_time = time.time if state.has_key('msg_queue'): dbwriter.msg_queue = state['msg_queue'] if state.has_key('overflow') and (dbwriter.max_queued != None): dbwriter.overflow = state['overflow']
def __setstate__(self, state): Component.__setstate__(self, state) self.reservations = state['reservations'] if 'active' in state.keys(): self.active = state['active'] else: self.active = True self.id_gen = IncrID() self.id_gen.set(state['next_res_id']) global bgsched_id_gen bgsched_id_gen = self.id_gen self.cycle_id_gen = IncrID() self.cycle_id_gen.set(state['next_cycle_id']) global bgsched_cycle_id_gen bgsched_cycle_id_gen = self.cycle_id_gen self.queues = QueueDict() self.jobs = JobDict() self.started_jobs = {} self.sync_state = Cobalt.Util.FailureMode("Foreign Data Sync") self.get_current_time = time.time if state.has_key('msg_queue'): dbwriter.msg_queue = state['msg_queue'] if state.has_key('overflow') and (dbwriter.max_queued != None): dbwriter.overflow = state['overflow']
def __setstate__(self, state): Component.__setstate__(self, state) self.queue_assignments = state["queue_assignments"] self.down_nodes = state["down_nodes"] self.process_groups = ProcessGroupDict() self.all_nodes = set() self.running_nodes = set() self.node_order = {} try: self.configure(cluster_hostfile) except: self.logger.error("unable to load hostfile") self.alloc_only_nodes = {} # nodename:starttime if not state.has_key("cleaning_processes"): self.cleaning_processes = [] self.cleaning_host_count = {} # jobid:count self.locations_by_jobid = {} #jobid:[locations] self.jobid_to_user = {} #jobid:username self.alloc_timeout = int(get_cluster_system_config("allocation_timeout", 300)) self.logger.info("allocation timeout set to %d seconds." % self.alloc_timeout)
def __setstate__(self, state): Component.__setstate__(self, state)