def makeMap(self, records): self.workQueueMap = {} self.workQueueGlobalMap = {} subQueuesMap = {} # loop over all records for record in records: # pack workQueue = WorkQueue() workQueue.pack(record) # add to global map self.workQueueGlobalMap[workQueue.queue_id] = workQueue # skip inactive queues if not workQueue.isActive(): continue # normal queue or sub queue if workQueue.partitionID in [None, -1]: targetMap = self.workQueueMap else: targetMap = subQueuesMap # add VO if not targetMap.has_key(workQueue.VO): targetMap[workQueue.VO] = {} # add type if not targetMap[workQueue.VO].has_key(workQueue.queue_type): targetMap[workQueue.VO][workQueue.queue_type] = [] # add to list targetMap[workQueue.VO][workQueue.queue_type].append(workQueue) # add sub queues for VO, typeQueueMap in subQueuesMap.iteritems(): for queue_type, subQueues in typeQueueMap.iteritems(): for subQueue in subQueues: try: # look for partition for normalQueue in self.workQueueMap[VO][queue_type]: if normalQueue.queue_id == subQueue.partitionID: normalQueue.addSubQueue(subQueue) break except: pass # sort the queue list by order for VO, typeQueueMap in self.workQueueMap.iteritems(): for queueType, queueMap in typeQueueMap.iteritems(): # make ordered map orderedMap = {} for workQueue in queueMap: if not orderedMap.has_key(workQueue.queue_order): orderedMap[workQueue.queue_order] = [] # append orderedMap[workQueue.queue_order].append(workQueue) # make orderd list orderList = orderedMap.keys() orderList.sort() newList = [] for orderVal in orderList: newList += orderedMap[orderVal] # set new list self.workQueueMap[VO][queueType] = newList # return return
def getSqlQuery(self): """ Generates the SQL to get all work queues """ sql = "SELECT {0} FROM ATLAS_PANDA.JEDI_Work_Queue".format(WorkQueue.column_names()) return sql
def getSqlQuery(self): """ Generates the SQL to get all work queues """ sql = "SELECT {0} FROM ATLAS_PANDA.JEDI_Work_Queue".format( WorkQueue.column_names()) return sql
def getSqlQuery(self): sql = "SELECT %s FROM ATLAS_PANDA.JEDI_Work_Queue" % WorkQueue.columnNames() return sql
def makeMap(self, work_queues, global_leave_shares): """ Creates the mapping with work queues and global shares :param work_queues: work queues :param global_leave_shares: global leave shares :return """ # 1. add all workqueues to the map for wq in work_queues: # pack work_queue = WorkQueue() work_queue.pack(wq) # skip inactive queues if not work_queue.isActive(): continue # add VO if work_queue.VO not in self.work_queue_map: self.work_queue_map[work_queue.VO] = {} # add type if not self.work_queue_map[work_queue.VO].has_key(work_queue.queue_type): self.work_queue_map[work_queue.VO][work_queue.queue_type] = [] self.work_queue_map[work_queue.VO][work_queue.queue_type].append(work_queue) self.work_queue_global_dic_by_name[work_queue.queue_name] = work_queue self.work_queue_global_dic_by_id[work_queue.queue_id] = work_queue # sort the queue list by order for vo in self.work_queue_map: for type in self.work_queue_map[vo]: # make ordered map ordered_map = {} queue_map = self.work_queue_map[vo][type] for wq in queue_map: if wq.queue_order not in ordered_map: ordered_map[wq.queue_order] = [] # append ordered_map[wq.queue_order].append(wq) # make sorted list ordered_list = ordered_map.keys() ordered_list.sort() new_list = [] for order_val in ordered_list: new_list += ordered_map[order_val] # set new list self.work_queue_map[vo][type] = new_list # 2. add all the global shares for gs in global_leave_shares: work_queue_gs = WorkQueue() work_queue_gs.pack_gs(gs) if work_queue_gs.VO is None: vo = 'atlas' else: vo = work_queue_gs.VO if vo not in self.work_queue_map: self.work_queue_map[vo] = {} if not self.work_queue_map[vo].has_key(work_queue_gs.queue_type): self.work_queue_map[vo][work_queue_gs.queue_type] = [] self.work_queue_map[vo][work_queue_gs.queue_type].append(work_queue_gs) self.work_queue_global_dic_by_name[work_queue_gs.queue_name] = work_queue_gs self.work_queue_global_dic_by_id[work_queue_gs.queue_id] = work_queue_gs # return return
def getSqlQuery(self): sql = "SELECT %s FROM ATLAS_PANDA.JEDI_Work_Queue" % WorkQueue.columnNames( ) return sql
# Load the RootConfig.yaml rootConfig = LoadRootConfig(prefixDir) # Open the PickleDb for the passphrases (encrypted with locking passphrase) passPhrases = ManagePassPhrases(rootConfig=rootConfig) # Prompt for the Locking Passphrase # validate the passphrase against the pickledb root entry (if any, or write new) passPhrases.AskForMasterLockPassphrase() # Prepare the work list workQueue = WorkQueue(rootConfig=rootConfig, passPhrases=passPhrases) # Read the current certificate manifest workQueue.AssimilateExistingCerts() # Create a work entry for each cert which must be created, including root CAs. # 1. Compute quantums and compare to existing certs. # Look at expire dates and see if new certs need to be created # 2. Look at the Load and see how many certs must be created for each type # and possibily more certs to handle higher load workQueue.DiscoverAllNewWork() # Loop through all previous certs # 1. Find their passphrase and decrypt them
def makeMap(self, work_queues, global_leave_shares): """ Creates the mapping with work queues and global shares :param work_queues: work queues :param global_leave_shares: global leave shares :return """ # 1. add all workqueues to the map for wq in work_queues: # pack work_queue = WorkQueue() work_queue.pack(wq) # skip inactive queues if not work_queue.isActive(): continue # add VO if work_queue.VO not in self.work_queue_map: self.work_queue_map[work_queue.VO] = {} # add type if not self.work_queue_map[work_queue.VO].has_key( work_queue.queue_type): self.work_queue_map[work_queue.VO][work_queue.queue_type] = [] self.work_queue_map[work_queue.VO][work_queue.queue_type].append( work_queue) self.work_queue_global_dic_by_name[ work_queue.queue_name] = work_queue self.work_queue_global_dic_by_id[work_queue.queue_id] = work_queue # sort the queue list by order for vo in self.work_queue_map: for type in self.work_queue_map[vo]: # make ordered map ordered_map = {} queue_map = self.work_queue_map[vo][type] for wq in queue_map: if wq.queue_order not in ordered_map: ordered_map[wq.queue_order] = [] # append ordered_map[wq.queue_order].append(wq) # make sorted list ordered_list = ordered_map.keys() ordered_list.sort() new_list = [] for order_val in ordered_list: new_list += ordered_map[order_val] # set new list self.work_queue_map[vo][type] = new_list # 2. add all the global shares for gs in global_leave_shares: work_queue_gs = WorkQueue() work_queue_gs.pack_gs(gs) if work_queue_gs.VO is None: vo = 'atlas' else: vo = work_queue_gs.VO if vo not in self.work_queue_map: self.work_queue_map[vo] = {} if not self.work_queue_map[vo].has_key(work_queue_gs.queue_type): self.work_queue_map[vo][work_queue_gs.queue_type] = [] self.work_queue_map[vo][work_queue_gs.queue_type].append( work_queue_gs) self.work_queue_global_dic_by_name[ work_queue_gs.queue_name] = work_queue_gs self.work_queue_global_dic_by_id[ work_queue_gs.queue_id] = work_queue_gs # return return