def test_order(self): t1 = time() t2 = time() + 300 # windows clock not reliable a = timetuple(133, t1, 'A', 'obj') b = timetuple(140, t1, 'A', 'obj') self.assertTrue(a.__getnewargs__()) self.assertEqual(a.clock, 133) self.assertEqual(a.timestamp, t1) self.assertEqual(a.id, 'A') self.assertEqual(a.obj, 'obj') self.assertTrue(a <= b, ) self.assertTrue(b >= a, ) self.assertEqual( timetuple(134, time(), 'A', 'obj').__lt__(tuple()), NotImplemented, ) self.assertGreater( timetuple(134, t2, 'A', 'obj'), timetuple(133, t1, 'A', 'obj'), ) self.assertGreater( timetuple(134, t1, 'B', 'obj'), timetuple(134, t1, 'A', 'obj'), ) self.assertGreater( timetuple(None, t2, 'B', 'obj'), timetuple(None, t1, 'A', 'obj'), )
def test_order(self): t1 = time() t2 = time() + 300 # windows clock not reliable a = timetuple(133, t1, 'A', 'obj') b = timetuple(140, t1, 'A', 'obj') self.assertTrue(a.__getnewargs__()) self.assertEqual(a.clock, 133) self.assertEqual(a.timestamp, t1) self.assertEqual(a.id, 'A') self.assertEqual(a.obj, 'obj') self.assertTrue( a <= b, ) self.assertTrue( b >= a, ) self.assertEqual( timetuple(134, time(), 'A', 'obj').__lt__(tuple()), NotImplemented, ) self.assertGreater( timetuple(134, t2, 'A', 'obj'), timetuple(133, t1, 'A', 'obj'), ) self.assertGreater( timetuple(134, t1, 'B', 'obj'), timetuple(134, t1, 'A', 'obj'), ) self.assertGreater( timetuple(None, t2, 'B', 'obj'), timetuple(None, t1, 'A', 'obj'), )
def _event(event, timetuple=timetuple, KeyError=KeyError, created=True): self.event_count += 1 if event_callback: event_callback(self, event) group, _, subject = event["type"].partition("-") try: handler = get_handler(group) except KeyError: pass else: return handler(subject, event) if group == "worker": try: hostname, timestamp, local_received = wfields(event) except KeyError: pass else: try: worker, created = get_worker(hostname), False except KeyError: worker = workers[hostname] = Worker(hostname) worker.event(subject, timestamp, local_received, event) return created elif group == "task": (uuid, hostname, timestamp, local_received, clock) = tfields(event) # task-sent event is sent by client, not worker is_client_event = subject == "sent" try: task, created = get_task(uuid), False except KeyError: task = tasks[uuid] = Task(uuid) if is_client_event: task.client = hostname else: try: worker, created = get_worker(hostname), False except KeyError: worker = workers[hostname] = Worker(hostname) task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) origin = hostname if is_client_event else worker.id heappush(taskheap, timetuple(clock, timestamp, origin, ref(task))) if len(taskheap) > maxtasks: heappop(taskheap) if subject == "received": self.task_count += 1 task.event(subject, timestamp, local_received, event) task_name = task.name if task_name is not None: add_type(task_name) return created
def test_order(self): t1 = time() t2 = time() + 300 # windows clock not reliable a = timetuple(133, t1, 'A', 'obj') b = timetuple(140, t1, 'A', 'obj') assert a.__getnewargs__() assert a.clock == 133 assert a.timestamp == t1 assert a.id == 'A' assert a.obj == 'obj' assert a <= b assert b >= a assert (timetuple(134, time(), 'A', 'obj').__lt__(tuple()) is NotImplemented) assert timetuple(134, t2, 'A', 'obj') > timetuple(133, t1, 'A', 'obj') assert timetuple(134, t1, 'B', 'obj') > timetuple(134, t1, 'A', 'obj') assert (timetuple(None, t2, 'B', 'obj') > timetuple( None, t1, 'A', 'obj'))
def test_order(self): t1 = time() t2 = time() + 300 # windows clock not reliable a = timetuple(133, t1, 'A', 'obj') b = timetuple(140, t1, 'A', 'obj') assert a.__getnewargs__() assert a.clock == 133 assert a.timestamp == t1 assert a.id == 'A' assert a.obj == 'obj' assert a <= b assert b >= a assert (timetuple(134, time(), 'A', 'obj').__lt__(tuple()) is NotImplemented) assert timetuple(134, t2, 'A', 'obj') > timetuple(133, t1, 'A', 'obj') assert timetuple(134, t1, 'B', 'obj') > timetuple(134, t1, 'A', 'obj') assert (timetuple(None, t2, 'B', 'obj') > timetuple(None, t1, 'A', 'obj'))
def task_event(self, type, fields, timetuple=timetuple): """Process task event.""" uuid = fields['uuid'] hostname = fields['hostname'] worker, _ = self.get_or_create_worker(hostname) task, created = self.get_or_create_task(uuid) task.worker = worker maxtasks = self.max_tasks_in_memory * 2 taskheap = self._taskheap timestamp = fields.get('timestamp') or 0 clock = 0 if type == 'sent' else fields.get('clock') heappush(taskheap, timetuple(clock, timestamp, worker.id, task)) if len(taskheap) > maxtasks: heappop(taskheap) handler = getattr(task, 'on_' + type, None) if type == 'received': self.task_count += 1 if handler: handler(**fields) else: task.on_unknown_event(type, **fields) return created
def test_pickleable(self) -> None: x = timetuple(133, time(), 'id', 'obj') assert pickle.loads(pickle.dumps(x)) == tuple(x)
def test_repr(self): x = timetuple(133, time(), 'id', Mock()) self.assertTrue(repr(x))
def rebuild_taskheap(self, timetuple=timetuple): heap = self._taskheap[:] = [ timetuple(t.clock, t.timestamp, t.origin, ref(t)) for t in values(self.tasks) ] heap.sort()
def test_repr(self) -> None: x = timetuple(133, time(), 'id', Mock()) assert repr(x)
def _event(event, timetuple=timetuple, KeyError=KeyError, created=True): self.event_count += 1 if event_callback: event_callback(self, event) group, _, subject = event['type'].partition('-') try: handler = get_handler(group) except KeyError: pass else: return handler(subject, event), subject if group == 'worker': try: hostname, timestamp, local_received = wfields(event) except KeyError: pass else: try: worker, created = get_worker(hostname), False except KeyError: if subject == 'offline': worker, created = None, False else: worker = workers[hostname] = Worker(hostname) if worker: worker.event(subject, timestamp, local_received, event) if on_node_join and (created or subject == 'online'): on_node_join(worker) if on_node_leave and subject == 'offline': on_node_leave(worker) return (worker, created), subject elif group == 'task': (uuid, hostname, timestamp, local_received, clock) = tfields(event) # task-sent event is sent by client, not worker is_client_event = subject == 'sent' try: task, created = get_task(uuid), False except KeyError: task = tasks[uuid] = Task(uuid) if is_client_event: task.client = hostname else: try: worker, created = get_worker(hostname), False except KeyError: worker = workers[hostname] = Worker(hostname) task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) origin = hostname if is_client_event else worker.id heappush(taskheap, timetuple(clock, timestamp, origin, ref(task))) if len(taskheap) > max_events_in_heap: heappop(taskheap) if subject == 'received': self.task_count += 1 task.event(subject, timestamp, local_received, event) task_name = task.name if task_name is not None: add_type(task_name) return (task, created), subject
def _event(event, timetuple=timetuple, KeyError=KeyError, insort=bisect.insort, created=True): self.event_count += 1 if event_callback: event_callback(self, event) group, _, subject = event['type'].partition('-') try: handler = get_handler(group) except KeyError: pass else: return handler(subject, event), subject if group == 'worker': try: hostname, timestamp, local_received = wfields(event) except KeyError: pass else: is_offline = subject == 'offline' try: worker, created = get_worker(hostname), False except KeyError: if is_offline: worker, created = Worker(hostname), False else: worker = workers[hostname] = Worker(hostname) worker.event(subject, timestamp, local_received, event) if on_node_join and (created or subject == 'online'): on_node_join(worker) if on_node_leave and is_offline: on_node_leave(worker) workers.pop(hostname, None) return (worker, created), subject elif group == 'task': (uuid, hostname, timestamp, local_received, clock) = tfields(event) # task-sent event is sent by client, not worker is_client_event = subject == 'sent' try: task, created = get_task(uuid), False except KeyError: task = tasks[uuid] = Task(uuid, cluster_state=self) if is_client_event: task.client = hostname else: try: worker, created = get_worker(hostname), False except KeyError: worker = workers[hostname] = Worker(hostname) task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) origin = hostname if is_client_event else worker.id # remove oldest event if exceeding the limit. heaps = len(taskheap) if heaps + 1 > max_events_in_heap: th_pop(0) # most events will be dated later than the previous. timetup = timetuple(clock, timestamp, origin, ref(task)) if heaps and timetup > taskheap[-1]: th_append(timetup) else: insort(taskheap, timetup) if subject == 'received': self.task_count += 1 task.event(subject, timestamp, local_received, event) task_name = task.name if task_name is not None: add_type(task_name) if task.parent_id: try: parent_task = self.tasks[task.parent_id] except KeyError: self._add_pending_task_child(task) else: parent_task.children.add(task) try: _children = self._tasks_to_resolve.pop(uuid) except KeyError: pass else: task.children.update(_children) return (task, created), subject
def test_pickleable(self): x = timetuple(133, time(), 'id', 'obj') assert pickle.loads(pickle.dumps(x)) == tuple(x)
def test_repr(self): x = timetuple(133, time(), 'id', Mock()) assert repr(x)
def _event(event, timetuple=timetuple, KeyError=KeyError, insort=bisect.insort, created=True): self.event_count += 1 if event_callback: event_callback(self, event) group, _, subject = event['type'].partition('-') try: handler = get_handler(group) except KeyError: pass else: return handler(subject, event), subject if group == 'worker': try: hostname, timestamp, local_received = wfields(event) except KeyError: pass else: is_offline = subject == 'offline' try: worker, created = get_worker(hostname), False except KeyError: if is_offline: worker, created = Worker(hostname), False else: worker = workers[hostname] = Worker(hostname) worker.event(subject, timestamp, local_received, event) if on_node_join and (created or subject == 'online'): on_node_join(worker) if on_node_leave and is_offline: on_node_leave(worker) workers.pop(hostname, None) return (worker, created), subject elif group == 'task': (uuid, hostname, timestamp, local_received, clock) = tfields(event) # task-sent event is sent by client, not worker is_client_event = subject == 'sent' try: task, task_created = get_task(uuid), False except KeyError: task = tasks[uuid] = Task(uuid, cluster_state=self) task_created = True if is_client_event: task.client = hostname else: try: worker = get_worker(hostname) except KeyError: worker = workers[hostname] = Worker(hostname) task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) origin = hostname if is_client_event else worker.id # remove oldest event if exceeding the limit. heaps = len(taskheap) if heaps + 1 > max_events_in_heap: th_pop(0) # most events will be dated later than the previous. timetup = timetuple(clock, timestamp, origin, ref(task)) if heaps and timetup > taskheap[-1]: th_append(timetup) else: insort(taskheap, timetup) if subject == 'received': self.task_count += 1 task.event(subject, timestamp, local_received, event) task_name = task.name if task_name is not None: add_type(task_name) if task_created: # add to tasks_by_type index get_task_by_type_set(task_name).add(task) get_task_by_worker_set(hostname).add(task) if task.parent_id: try: parent_task = self.tasks[task.parent_id] except KeyError: self._add_pending_task_child(task) else: parent_task.children.add(task) try: _children = self._tasks_to_resolve.pop(uuid) except KeyError: pass else: task.children.update(_children) return (task, task_created), subject
def test_pickleable(self): x = timetuple(133, time(), 'id', 'obj') self.assertEqual(pickle.loads(pickle.dumps(x)), tuple(x))