def test_create_and_cancel_job_enqueue_dependents_in_registry(self): """Ensure job.cancel() works properly with enqueue_dependents=True and when the job is in a registry""" queue = Queue(connection=self.testconn) dependency = queue.enqueue(fixtures.raise_exc) dependent = queue.enqueue(fixtures.say_hello, depends_on=dependency) self.assertEqual(1, len(queue.get_jobs())) self.assertEqual(1, len(queue.deferred_job_registry)) w = Worker([queue]) w.work(burst=True, max_jobs=1) dependency.refresh() dependent.refresh() self.assertEqual(0, len(queue.get_jobs())) self.assertEqual(1, len(queue.deferred_job_registry)) self.assertEqual(1, len(queue.failed_job_registry)) cancel_job(dependency.id, enqueue_dependents=True) dependency.refresh() dependent.refresh() self.assertEqual(1, len(queue.get_jobs())) self.assertEqual(0, len(queue.deferred_job_registry)) self.assertEqual(0, len(queue.failed_job_registry)) self.assertEqual(1, len(queue.canceled_job_registry)) registry = CanceledJobRegistry(connection=self.testconn, queue=queue) self.assertIn(dependency, registry) self.assertEqual(dependency.get_status(), JobStatus.CANCELED) self.assertNotIn(dependency, queue.failed_job_registry) self.assertIn(dependent, queue.get_jobs()) self.assertEqual(dependent.get_status(), JobStatus.QUEUED) # If job is deleted, it's also removed from CanceledJobRegistry dependency.delete() self.assertNotIn(dependency, registry)
def listen_for_jobs(): while True: string = socket_sub.recv().decode("UTF-8") topic, job_data = string.split(" ", 1) print("Job arrived: ", topic, job_data) q.enqueue(job_data) _thread.start_new_thread(thread, ("JOB_EXECUTION_IN_PARALLEL", 0))
def test_create_and_cancel_job_enqueue_dependents_with_pipeline(self): """Ensure job.cancel() works properly with enqueue_dependents=True""" queue = Queue(connection=self.testconn) dependency = queue.enqueue(fixtures.say_hello) dependent = queue.enqueue(fixtures.say_hello, depends_on=dependency) self.assertEqual(1, len(queue.get_jobs())) self.assertEqual(1, len(queue.deferred_job_registry)) self.testconn.set('some:key', b'some:value') with self.testconn.pipeline() as pipe: pipe.watch('some:key') self.assertEqual(self.testconn.get('some:key'), b'some:value') dependency.cancel(pipeline=pipe, enqueue_dependents=True) pipe.set('some:key', b'some:other:value') pipe.execute() self.assertEqual(self.testconn.get('some:key'), b'some:other:value') self.assertEqual(1, len(queue.get_jobs())) self.assertEqual(0, len(queue.deferred_job_registry)) registry = CanceledJobRegistry(connection=self.testconn, queue=queue) self.assertIn(dependency, registry) self.assertEqual(dependency.get_status(), JobStatus.CANCELED) self.assertIn(dependent, queue.get_jobs()) self.assertEqual(dependent.get_status(), JobStatus.QUEUED) # If job is deleted, it's also removed from CanceledJobRegistry dependency.delete() self.assertNotIn(dependency, registry)
def enqueue_download(package, runtime_id): args = { 'package_id': package.id, 'package_name': package.package_name, 'runtime_id': runtime_id, 'man_auto': package.man_auto } queue.enqueue(download_queue, args)
def test_can_enqueue_job_if_dependency_is_deleted(self): queue = Queue(connection=self.testconn) dependency_job = queue.enqueue(fixtures.say_hello, result_ttl=0) w = Worker([queue]) w.work(burst=True) assert queue.enqueue(fixtures.say_hello, depends_on=dependency_job)
def test_job_get_position(self): queue = Queue(connection=self.testconn) job = queue.enqueue(fixtures.say_hello) job2 = queue.enqueue(fixtures.say_hello) job3 = Job(fixtures.say_hello) self.assertEqual(0, job.get_position()) self.assertEqual(1, job2.get_position()) self.assertEqual(None, job3.get_position())
def breadth_first(self): queue = Queue.Queue(self) while queue.isEmpty() == False: node=queue.dequeue() print(str(node.key)) if node.has_lchild(): queue.enqueue(node.lchild) if node.has_rchild(): queue.enqueue(node.rchild)
def test_dependents_are_met_if_dependency_is_deleted(self): queue = Queue(connection=self.testconn) dependency_job = queue.enqueue(fixtures.say_hello, result_ttl=0) dependent_job = queue.enqueue(fixtures.say_hello, depends_on=dependency_job) w = Worker([queue]) w.work(burst=True, max_jobs=1) assert dependent_job.dependencies_are_met() assert dependent_job.get_status() == JobStatus.QUEUED
def test_dependencies_are_met_at_execution_time(self): queue = Queue(connection=self.testconn) queue.enqueue(fixtures.say_hello, job_id="A") queue.enqueue(fixtures.say_hello, job_id="B") job_C = queue.enqueue(fixtures.check_dependencies_are_met, job_id="C", depends_on=["A", "B"]) w = Worker([queue]) w.work(burst=True) assert job_C.result
def levelByLevel(self, aFile): ''' Print the nodes of the BTree level-by-level on aFile. ) ''' aFile.write("A level-by-level listing of the nodes:\n") queue = MyQueue() queue.enqueue(self.rootNode) while not queue.isEmpty(): currentNode = queue.dequeue() aFile.write(str(currentNode)) for i in range(currentNode.getNumberOfKeys() + 1): child = self.readFrom(currentNode.child[i]) if child != None: queue.enqueue(child)
def findRouteBFS(root, node): queue = Queue() root.visited = True queue.enqueue(root) while not queue.isEmpty(): r = queue.dequeue() if r == node: return True for x in r.children: if x.visited != True: x.visited = True queue.enqueue(x) return False
def test_create_job_with_async(self): """test creating jobs with async function""" queue = Queue(connection=self.testconn) async_job = queue.enqueue(fixtures.say_hello_async, job_id="async_job") sync_job = queue.enqueue(fixtures.say_hello, job_id="sync_job") self.assertEqual(async_job.id, "async_job") self.assertEqual(sync_job.id, "sync_job") async_task_result = async_job.perform() sync_task_result = sync_job.perform() self.assertEqual(sync_task_result, async_task_result)
def test_queue_usage(self): queue = QueueTwoStacks() queue.enqueue(1) queue.enqueue(2) queue.enqueue(3) actual = queue.dequeue() expected = 1 self.assertEqual(actual, expected) actual = queue.dequeue() expected = 2 self.assertEqual(actual, expected) queue.enqueue(4) actual = queue.dequeue() expected = 3 self.assertEqual(actual, expected) actual = queue.dequeue() expected = 4 self.assertEqual(actual, expected) with self.assertRaises(Exception): queue.dequeue()
def test_execution_order_with_dual_dependency(self): queue = Queue(connection=self.testconn) key = 'test_job:job_order' # When there are no dependencies, the two fast jobs ("A" and "B") run in the order enqueued. job_slow_1 = queue.enqueue(fixtures.rpush, args=[key, "slow_1", True, 0.5], job_id='slow_1') job_slow_2 = queue.enqueue(fixtures.rpush, args=[key, "slow_2", True, 0.75], job_id='slow_2') job_A = queue.enqueue(fixtures.rpush, args=[key, "A", True]) job_B = queue.enqueue(fixtures.rpush, args=[key, "B", True]) fixtures.burst_two_workers(queue) time.sleep(1) jobs_completed = [v.decode() for v in self.testconn.lrange(key, 0, 3)] self.assertEqual(queue.count, 0) self.assertTrue(all(job.is_finished for job in [job_slow_1, job_slow_2, job_A, job_B])) self.assertEqual(jobs_completed, ["slow_1:w1", "A:w1", "B:w1", "slow_2:w2"]) self.testconn.delete(key) # This time job "A" depends on two slow jobs, while job "B" depends only on the faster of # the two. Job "B" should be completed before job "A". # There is no clear requirement on which worker should take job "A", so we stay silent on that. job_slow_1 = queue.enqueue(fixtures.rpush, args=[key, "slow_1", True, 0.5], job_id='slow_1') job_slow_2 = queue.enqueue(fixtures.rpush, args=[key, "slow_2", True, 0.75], job_id='slow_2') job_A = queue.enqueue(fixtures.rpush, args=[key, "A", False], depends_on=['slow_1', 'slow_2']) job_B = queue.enqueue(fixtures.rpush, args=[key, "B", True], depends_on=['slow_1']) fixtures.burst_two_workers(queue) time.sleep(1) jobs_completed = [v.decode() for v in self.testconn.lrange(key, 0, 3)] self.assertEqual(queue.count, 0) self.assertTrue(all(job.is_finished for job in [job_slow_1, job_slow_2, job_A, job_B])) self.assertEqual(jobs_completed, ["slow_1:w1", "B:w1", "slow_2:w2", "A"])
def test_1(self): queue = AnimalQueue() queue.enqueue(Animal(AnimalEnum.DOG)) queue.enqueue(Animal(AnimalEnum.DOG)) queue.enqueue(Animal(AnimalEnum.CAT)) queue.enqueue(Animal(AnimalEnum.CAT)) queue.enqueue(Animal(AnimalEnum.CAT)) self.assertEqual(AnimalEnum.CAT, queue.dequeue_cat().enum) self.assertEqual(AnimalEnum.DOG, queue.dequeue_any().enum) self.assertEqual(AnimalEnum.DOG, queue.dequeue_dog().enum) self.assertEqual(AnimalEnum.CAT, queue.dequeue_any().enum) self.assertEqual(AnimalEnum.CAT, queue.dequeue_any().enum) self.assertEqual(None, queue.dequeue_any()) return
def test_create_and_cancel_job(self): """test creating and using cancel_job deletes job properly""" queue = Queue(connection=self.testconn) job = queue.enqueue(fixtures.say_hello) self.assertEqual(1, len(queue.get_jobs())) cancel_job(job.id) self.assertEqual(0, len(queue.get_jobs()))
def test_create_job_from_static_method(self): """test creating jobs with static method""" queue = Queue(connection=self.testconn) job = queue.enqueue(fixtures.ClassWithAStaticMethod.static_method) self.assertIsNotNone(job.get_call_string()) job.perform()
def test_create_and_cancel_job_with_serializer(self): """test creating and using cancel_job (with serializer) deletes job properly""" queue = Queue(connection=self.testconn, serializer=JSONSerializer) job = queue.enqueue(fixtures.say_hello) self.assertEqual(1, len(queue.get_jobs())) cancel_job(job.id, serializer=JSONSerializer) self.assertEqual(0, len(queue.get_jobs()))
def test_create_job_with_id(self): """test creating jobs with a custom ID""" queue = Queue(connection=self.testconn) job = queue.enqueue(fixtures.say_hello, job_id="1234") self.assertEqual(job.id, "1234") job.perform() self.assertRaises(TypeError, queue.enqueue, fixtures.say_hello, job_id=1234)
def test_get_call_string_unicode(self): """test call string with unicode keyword arguments""" queue = Queue(connection=self.testconn) job = queue.enqueue(fixtures.echo, arg_with_unicode=fixtures.UnicodeStringObject()) self.assertIsNotNone(job.get_call_string()) job.perform()
def test_1(self): queue = MyQueue() queue.enqueue(1) queue.enqueue(2) queue.enqueue(3) queue.enqueue(4) self.assertEqual(1, queue.dequeue()) self.assertEqual(2, queue.dequeue()) self.assertEqual(3, queue.dequeue()) self.assertEqual(4, queue.dequeue()) return
def test_dependent_job_creates_dependencies_key(self): queue = Queue(connection=self.testconn) dependency_job = queue.enqueue(fixtures.say_hello) dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job) dependent_job.register_dependency() dependent_job.save() self.assertTrue(self.testconn.exists(dependent_job.dependencies_key))
def test_create_and_cancel_job_enqueue_dependents(self): """Ensure job.cancel() works properly with enqueue_dependents=True""" queue = Queue(connection=self.testconn) dependency = queue.enqueue(fixtures.say_hello) dependent = queue.enqueue(fixtures.say_hello, depends_on=dependency) self.assertEqual(1, len(queue.get_jobs())) self.assertEqual(1, len(queue.deferred_job_registry)) cancel_job(dependency.id, enqueue_dependents=True) self.assertEqual(1, len(queue.get_jobs())) self.assertEqual(0, len(queue.deferred_job_registry)) registry = CanceledJobRegistry(connection=self.testconn, queue=queue) self.assertIn(dependency, registry) self.assertEqual(dependency.get_status(), JobStatus.CANCELED) self.assertIn(dependent, queue.get_jobs()) self.assertEqual(dependent.get_status(), JobStatus.QUEUED) # If job is deleted, it's also removed from CanceledJobRegistry dependency.delete() self.assertNotIn(dependency, registry)
def scheduling(listTask, time): print(f"waktu proses cpu = {time}") print(f"antrian proses : {listTask.values()}") counter = 1 totalTime = 0 listName = qq.createQueue() for i in listTask: qq.enqueue(listName, i) #print(listName) #print(listTask) #print(' ') while not qq.isEmpty(listName): print(f"iterasi ke - {counter}") counter += 1 name = qq.dequeue(listName) value = listTask[name][1] print( f"proses {name} sedang dikerjakan, sisa waktu proses {name} = {value}" ) if value > time: totalTime += time value = value - time listTask[name][1] = value qq.enqueue(listName, name) print(f"antrian data tersisa : {listName}") print(f"sisa task {listTask}") elif value <= time: totalTime += value listTask[name][1] = 0 listTask[name][2] = totalTime print(f"proses {name} selesai") print(f"antrian data tersisa : {listName}") print(f"sisa task {listTask}") return listTask
def test_never_expire_during_execution(self): """Test what happens when job expires during execution""" ttl = 1 queue = Queue(connection=self.testconn) job = queue.enqueue(fixtures.long_running_job, args=(2, ), ttl=ttl) self.assertEqual(job.get_ttl(), ttl) job.save() job.perform() self.assertEqual(job.get_ttl(), ttl) self.assertTrue(job.exists(job.id)) self.assertEqual(job.result, 'Done sleeping...')
def test_fetch_dependencies_returns_dependency_jobs(self): queue = Queue(connection=self.testconn) dependency_job = queue.enqueue(fixtures.say_hello) dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job) dependent_job.register_dependency() dependent_job.save() dependencies = dependent_job.fetch_dependencies(pipeline=self.testconn) self.assertListEqual(dependencies, [dependency_job])
def test_fetch_dependencies_raises_if_dependency_deleted(self): queue = Queue(connection=self.testconn) dependency_job = queue.enqueue(fixtures.say_hello) dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job) dependent_job.register_dependency() dependent_job.save() dependency_job.delete() with self.assertRaises(NoSuchJobError): dependent_job.fetch_dependencies(pipeline=self.testconn)
def test_execution_order_with_sole_dependency(self): queue = Queue(connection=self.testconn) key = 'test_job:job_order' # When there are no dependencies, the two fast jobs ("A" and "B") run in the order enqueued. # Worker 1 will be busy with the slow job, so worker 2 will complete both fast jobs. job_slow = queue.enqueue(fixtures.rpush, args=[key, "slow", True, 0.5], job_id='slow_job') job_A = queue.enqueue(fixtures.rpush, args=[key, "A", True]) job_B = queue.enqueue(fixtures.rpush, args=[key, "B", True]) fixtures.burst_two_workers(queue) time.sleep(0.75) jobs_completed = [v.decode() for v in self.testconn.lrange(key, 0, 2)] self.assertEqual(queue.count, 0) self.assertTrue(all(job.is_finished for job in [job_slow, job_A, job_B])) self.assertEqual(jobs_completed, ["A:w2", "B:w2", "slow:w1"]) self.testconn.delete(key) # When job "A" depends on the slow job, then job "B" finishes before "A". # There is no clear requirement on which worker should take job "A", so we stay silent on that. job_slow = queue.enqueue(fixtures.rpush, args=[key, "slow", True, 0.5], job_id='slow_job') job_A = queue.enqueue(fixtures.rpush, args=[key, "A", False], depends_on='slow_job') job_B = queue.enqueue(fixtures.rpush, args=[key, "B", True]) fixtures.burst_two_workers(queue) time.sleep(0.75) jobs_completed = [v.decode() for v in self.testconn.lrange(key, 0, 2)] self.assertEqual(queue.count, 0) self.assertTrue(all(job.is_finished for job in [job_slow, job_A, job_B])) self.assertEqual(jobs_completed, ["B:w2", "slow:w1", "A"])
def bfs(maze, queue): length = len(maze) start = maze[0][0] goal = (9, 9) queue.enqueue(start) while queue.isEmpty() == False: node = queue.dequeue() node.searched = True for x, y in (node.x + 1, node.y), (node.x - 1, node.y), (node.x, node.y - 1), (node.x, node.y + 1): if (0 <= x < length and 0 <= y < length) and ( maze[x][y].wall == False) and (maze[x][y].searched == False): if (x, y) == goal: return True else: maze[x][y].parent = node queue.enqueue(maze[x][y]) return False
def queue_urls(url, soup, queue, limiting_domain): ''' Forms a queue of all the urls Inputs: url: the url to put into the queue soup: BeautifulSoup object queue: the existing queue limiting_domain: a domain with which to stay in when queuing Outputs: None ''' for link in soup.find_all('a'): clean_url = util.convert_if_relative_url( url, util.remove_fragment(link.get('href'))) if util.is_absolute_url(clean_url) and str(clean_url)[0] != 'b': if (util.is_url_ok_to_follow( clean_url, limiting_domain)) and clean_url not in queue.all_items: queue.enqueue(clean_url)
def h_enqueue(queue, Q, A, h): ''' max_manhattan_remove Griffin A. Tucker Febraury 6 2018 This function takes a set of states (Q) and enqueues them to a given queue based on a supploed heuristic (h) with respect to a given accept state (A) Accepts: queue : The queue to enqueue a set of states to Q : A set of states to be enqueued to the queue A : An accepting state with which to use the heuristic with h : A heuristic function to base the enqueue on Returns: The queue with all states enqueued ''' # Check for valid parameters. If we fail, return the unmodified queue if queue is None or Q is None or A is None or h is None: return queue # Create and fill a list of heuristic values # Copy the states (we do not want to modify the original set) h_vals = [] Q_copy = [] for state_idx in range(0, len(Q)): new_q = Q[state_idx] Q_copy.append(new_q) h_vals.append(h(new_q.x, A[0][0], new_q.y, A[1][0])) # Enqueue onto the queue each state q in Q (sorry) based on sorted # h values of the states. while len(Q_copy) > 0: best_h = max(h_vals) best_q = Q_copy[h_vals.index(best_h)] queue.enqueue(best_q) Q_copy.remove(best_q) h_vals.remove(best_h) # Return the final queue return queue
def bfs(self, s): # breadth first search print("From bfs(self, s):") assert (s in self.vertex_names) s_ind = self.vertex_names.index(s) self.color = ["NIL"] * len(self.vertex_names) self.d = [-1] * len(self.vertex_names) self.pi = ["NIL"] * len(self.vertex_names) for i, u in enumerate(self.vertex_names): if (u != s): self.color[i] = "WHITE" self.d[i] = -1 # instead of inf self.pi[i] = "NIL" self.color[s_ind] = "GRAY" self.d[s_ind] = 0 self.pi[s_ind] = "NIL" Q = [] enqueue(Q, s) while (len(Q) != 0): u = dequeue(Q) u_ind = self.vertex_names.index(u) for v in self.vertices[u_ind].adj: v_ind = self.vertex_names.index(v) if (self.color[v_ind] == "WHITE"): self.color[v_ind] = "GREY" self.d[v_ind] = self.d[u_ind] + 1 self.pi[v_ind] = u enqueue(Q, v) self.color[u_ind] = "BLACK" for i, v in enumerate(self.vertex_names): print("v: " + str(v) + " d: " + str(self.d[i]) + " p: " + str(self.pi[i])) print("")
def enqueue_download(filename, runtime_id, package_id, context=None): args = {"filename": filename, "runtime_id": runtime_id, "package_id": package_id, "context": context} queue.enqueue(tests_queue, args)