def test_check_database_state(self): # Set up db = MockDB() poller_manager = PollerManager("pm1") poller_manager.db = db job_1 = Job("j1", 1, [Host("hostname", "user", "pass", "unique")]) job_2 = Job("j2", 1, [Host("hostname", "user", "pass", "unique")]) # Start a job and check state db.create_job(job_1, poller_manager.spawner_id) poller_manager.check_database_state() self.assertIn("j1", poller_manager.pollers) self.assertIsNotNone(poller_manager.pollers["j1"]) self.assertFalse(poller_manager.pollers["j1"].stopped) # Start and check another one db.create_job(job_2, poller_manager.spawner_id) poller_manager.check_database_state() self.assertIn("j2", poller_manager.pollers) self.assertIsNotNone(poller_manager.pollers["j2"]) self.assertFalse(poller_manager.pollers["j2"].stopped) # Send kill command and check state db.kill_job("j1") poller_manager.check_database_state() self.assertTrue(poller_manager.pollers["j1"].stopped) self.assertTrue(db.check_if_job_is_fully_dead("j1"))
def register(self, channel: Channel, job: Job) -> bool: if not channel.addJob(job): self.LogManager.debug('[QueueManager:'+str(channel.name)+'] job: ' + str(job.loguid()) + ' already registered') return False self.LogManager.debug('[QueueManager:'+str(channel.name)+'] job: ' + str(job.loguid())+ ' registered successfully') channel.pushWaitingJob(job)
def onMessage(self, client, data, mqttMessage): topic = mqttMessage.topic msg = str(mqttMessage.payload.decode("utf-8", "ignore")) msgJson = None try: msgJson = json.loads(msg) except: msgJson = dict() [root, channelName, actionName] = topic.split('/', 2) if actionName in [ 'register', 'finished', 'aborted', 'concurrency/set', 'infos' ]: self.LogManager.debug('[MqttManager] ' + topic + ' => ' + msg) else: return channel = self.QueueManager.checkChannel(channelName) if actionName == 'register': job = Job(uid=str(msgJson['uid']), ttl=int(msgJson['timeout'])) self.QueueManager.register(channel, job) elif actionName == 'finished': self.QueueManager.finished(channel, jobUid=msg) elif actionName == 'aborted': self.QueueManager.aborted(channel, jobUid=msg) elif actionName == 'concurrency/set': self.LogManager.debug( "[Channel] concurrency set to {}".format(msg)) channel.maxConcurrentJobs = int(msg) self.QueueManager.infos(channel)
def hi(update, context): chatId = update.effective_chat.id type = update.effective_chat.type title = update.effective_chat.title print("ChatId: {} - type: {} - title: {}".format(chatId, type, title)) if chatId not in groupChatId.keys(): groupChatId[chatId] = Job(chatId, type, title, "", 20) saveUserDict()
def addJob(self, job: Job) -> bool: if job.uid in self._jobs: return False job.channel = self self._jobs[job.uid] = job return True
def loadJobs(): with open("./db/jobs.csv", newline='') as csvfile: reader = csv.reader(csvfile) for row in reader: tmp = Job.fromRow(row) currentJobs[tmp.jobId] = tmp if tmp.jobId >= idCounter[0]: idCounter[0] = tmp.jobId + 1 return True
def get_job_deserialization(job_document): # job Job = Job() job_id = job_document['job_id'] interval = job_document['interval'] host_list = get_host_list_deserialization(job_document['host_list']) active = job_document['active'] job = Job(job_id=job_id, interval=interval, host_list=host_list, active=active) return job
def test_create_job(self): # Create a job for each container # Note that at this point we've already got a job id and container IDs, and we're just supplying them # to the method job_a = Job(self.job_id, 30, self.host_list_a) # Create the job result = self.db_obj.create_job(job_a, self.spawner_a_id) self.assertTrue(result) # Make sure creation was successful self.db_obj.check_job_assignments(self.spawner_a_id) # Kill the job self.db_obj.kill_job(self.job_id)
def start_computation(self, user_params): """ This method starts the computation with the defined parameters. :param model.UserParameters.UserParameters user_params: UserParameters :rtype : UUID """ job_id = uuid.uuid4() tasks = self._convert_user_params_to_tasks(user_params, job_id) tasklist = [] for task in tasks: # check self._start_task(task) tasklist.append(task) job = Job(job_id, tasklist, [], user_params, time.time()) self._jobs[str(job_id)] = job # TODO: we don't want workers to be started here anymore #self._start_workers(job) return job_id
from model.PollerManager import PollerManager from model.JobManager import JobManager from model.Job import Host from model.Job import Job import sys default_interval = 5 job_id = 1 if len(sys.argv) > 1: interval = float(sys.argv[1]) else: interval = default_interval print("No interval given - set to default: " + str(default_interval) + "s") host_list = [] host_list.append( Host("eb2tvx02drac.csc.ncsu.edu", "admin", "/*hoRV7or2C", "dell")) host_list.append( Host("eb2-2214-sd01ipmi.csc.ncsu.edu", "admin", "sdteam18", "cisco")) job = Job(job_id, interval, host_list) poller_manager = PollerManager("placeholder") print("PollerManager created - " + str(poller_manager)) poller_manager.start_job(job)
def kill(self, job: Job, killedBy:str = None): wasWorking = job.channel.removeWorkingJob(job=job) wasRegistered = job.channel.removeJob(uid=job.uid) self.LogManager.debug('[QueueManager:' + str(job.channel.name) + '] job: ' + job.loguid() + ' killed by: ' + killedBy.upper() + ' (wasWorking: '+('yes' if wasWorking else 'no')+', wasRegistered: '+('yes' if wasRegistered else 'no')+')')
def delegate(self, job_id, interval, host_list, spawner): spawner.jobs[job_id] = host_list spawner.total_hosts += len(host_list) self.db.create_job(Job(job_id, interval, host_list), spawner.spawner_id)
def createNewJob(address, note): tmp = Job(idCounter[0], address, note, 'Unassigned', None) currentJobs[tmp.jobId] = tmp saveJobs() idCounter[0] += 1