def test_submit_test_event(self): """ Testing POST /EventService/SubmitTestEvent """ global task # Suppose rackhd and test stack have the same localhost server = Httpd(port=int(self.__httpd_port), handler_class=self.EventServiceHandler) task = WorkerThread(server, 'httpd') worker = WorkerTasks(tasks=[task], func=self.__httpd_start) worker.run() redfish().test_event(body={}) worker.wait_for_completion(timeout_sec=60) if task.timeout: # Else port forward rackhd -> localhost server = Httpd(port=int(HTTPD_PORT), handler_class=self.EventServiceHandler) task = WorkerThread(server, 'httpd') worker = WorkerTasks(tasks=[task], func=self.__httpd_start) worker.run() # forward port for services running on a guest host session = open_ssh_forward(self.__httpd_port) redfish().test_event(body={}) worker.wait_for_completion(timeout_sec=60) session.logout() assert_false(task.timeout, message='timeout waiting for task {0}'.format(task.id))
def rack_listener(nb): def start(data, id): last_rack_set = {} while worker_info[id]['task'].running == True: current_rack_set = nb.rack_info() new_rack_set = Set(current_rack_set.keys()) - Set( last_rack_set.keys()) if len(new_rack_set) > 0: for rack_srv in new_rack_set: rack = current_rack_set[rack_srv]['service'] r = requests.get(rack.location + 'nodes/') for node in r.json(): try: add_rack_node(nb, node, rack) except Exception as e: log.error('Rack {0} Exception: {1}'.format( rack_srv, e.message)) pass device_listener(nb, rack) last_rack_set = copy.deepcopy(current_rack_set) time.sleep(2) task = WorkerThread(None, 'rack_listener') worker = WorkerTasks(tasks=[task], func=start) add_worker(worker, task, 'rack_listener') worker.run()
def test_submit_test_event(self): """ Testing POST /EventService/SubmitTestEvent """ global task # Suppose rackhd and test stack have the same localhost server = Httpd(port=int(self.__httpd_port), handler_class=self.EventServiceHandler) task = WorkerThread(server, 'httpd') worker = WorkerTasks(tasks=[task], func=self.__httpd_start) worker.run() redfish().test_event(body={}) worker.wait_for_completion(timeout_sec=60) if task.timeout: # Else port forward rackhd -> localhost server = Httpd(port=int(HTTPD_PORT), handler_class=self.EventServiceHandler) task = WorkerThread(server, 'httpd') worker = WorkerTasks(tasks=[task], func=self.__httpd_start) worker.run() # forward port for services running on a guest host session = open_ssh_forward(self.__httpd_port) redfish().test_event(body={}) worker.wait_for_completion(timeout_sec=60) session.logout() self.assertFalse(task.timeout, msg='timeout waiting for task {0}'.format(task.id)) self.assertFalse(self.__class__.eventHandlerFailed, msg='Event handler reported subscriptionId / memberId mismatch')
def run_workflow_tasks(self, tasks, timeout_sec): def thread_func(worker, id): worker.start() worker_tasks = WorkerTasks(tasks=self.__tasks, func=thread_func) worker_tasks.run() worker_tasks.wait_for_completion(timeout_sec=timeout_sec) for task in self.__tasks: assert_false(task.timeout, \ message='Timeout for {0}, node {1}'.format(self.__graph_name, task.id)) if 'failed' in self.__graph_status: fail('Failure running {0}'.format(self.__graph_name))
def _wait_until_graph_finish(self, graph_name, timevalue): self.__graph_name = graph_name self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \ callbacks=[self.__handle_graph_finish]), \ graph_name) def start(worker, id): worker.start() tasks = WorkerTasks(tasks=[self.__task], func=start) tasks.run() tasks.wait_for_completion(timeout_sec=timevalue) assert_false(self.__task.timeout, \ message='timeout waiting for task {0}'.format(self.__task.id))
def run_workflow_tasks(self, tasks, timeout_sec): def thread_func(worker, id): worker.start() worker_tasks = WorkerTasks(tasks=self.__tasks, func=thread_func) worker_tasks.run() worker_tasks.wait_for_completion(timeout_sec=timeout_sec) for task in tasks: if task.timeout: LOG.error('Timeout for {0}, node {1}'.format(self.__graph_name, task.id)) self.__graph_status.append('failed') if 'failed' in self.__graph_status: fail('Failure running {0}'.format(self.__graph_name))
def test_nodes_discovery(self): """ API 2.0 Testing Graph.Discovery completion """ self.__discovery_duration = datetime.now() LOG.info('Wait start time: {0}'.format(self.__discovery_duration)) self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \ callbacks=[self.handle_graph_finish]), 'discovery') def start(worker,id): worker.start() tasks = WorkerTasks(tasks=[self.__task], func=start) tasks.run() tasks.wait_for_completion(timeout_sec=1200) assert_false(self.__task.timeout, \ message='timeout waiting for task {0}'.format(self.__task.id))
def test_discovery(self): """ Wait for discovery finished """ self.case_recorder.write_event('start all discovery') self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \ callbacks=[self.handle_discovery_finish]), \ 'benchmark discovery') def start(worker, id): worker.start() tasks = WorkerTasks(tasks=[self.__task], func=start) tasks.run() tasks.wait_for_completion(timeout_sec=1200) assert_false(self.__task.timeout, \ message='timeout waiting for task {0}'.format(self.__task.id))
def service_listener(nb): def start(ssdp, id): while worker_info[id]['task'].running == True: services = ssdp.discover("urn:schemas-upnp-org:service:api:1.1") if len(services) > 0: for srv in services: nb.nb_add_rack(srv) time.sleep(5) task = WorkerThread(ssdp, 'service_listener') worker = WorkerTasks(tasks=[task], func=start) add_worker(worker, task, 'service_listener') worker.run()
def run_workflow_tasks(self, tasks, timeout_sec): def thread_func(worker, id): worker.start() tasks = self.__tasks if tasks is None else tasks worker_tasks = WorkerTasks(tasks=self.__tasks, func=thread_func) worker_tasks.run() worker_tasks.wait_for_completion(timeout_sec=timeout_sec) for task in tasks: if task.timeout: LOG.error('Timeout for {0}, node {1}'.format(self.__graph_name, task.id)) self.__graph_status.append('failed') if 'failed' in self.__graph_status: fail('Failure running {0}'.format(self.__graph_name))
def run_listener(q): log = Log(__name__,level='INFO') log.info('Run AMQP listener until ctrl-c input\n {0}'.format(q)) def thread_func(worker,id): worker.start() def signal_handler(signum,stack): sys.exit(0) signal.signal(signal.SIGINT, signal_handler) worker = AMQPWorker(queue=q) task = WorkerThread(worker,'amqp') tasks = WorkerTasks(tasks=[task], func=thread_func) tasks.run() tasks.wait_for_completion(timeout_sec=-1)
def test_submit_test_event(self): """ Testing POST /EventService/SubmitTestEvent """ global task server = Httpd(port=int(HTTPD_PORT), handler_class=self.EventServiceHandler) task = WorkerThread(server,'httpd') worker = WorkerTasks(tasks=[task], func=self.__httpd_start) worker.run() # forward port for services running on a guest host session = open_ssh_forward(self.__httpd_port) redfish().test_event(body={}) worker.wait_for_completion(timeout_sec=60) session.logout() assert_false(task.timeout, message='timeout waiting for task {0}'.format(task.id))
def run_listener(q, timeout_sec=3): log = Log(__name__, level='INFO') log.info('Run AMQP listener until ctrl-c input\n {0}'.format(q)) def thread_func(worker, id): worker.start() def signal_handler(signum, stack): sys.exit(0) signal.signal(signal.SIGINT, signal_handler) worker = AMQPWorker(queue=q) task = WorkerThread(worker, 'amqp') tasks = WorkerTasks(tasks=[task], func=thread_func) tasks.run() tasks.wait_for_completion(timeout_sec)
def test_nodes_discovery(self): """ API 2.0 Testing Graph.Discovery completion """ if self.check_compute_count(): LOG.warning('Nodes already discovered!') return self.__discovery_duration = datetime.now() LOG.info('Wait start time: {0}'.format(self.__discovery_duration)) self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \ callbacks=[self.handle_graph_finish]), 'discovery') def start(worker,id): worker.start() tasks = WorkerTasks(tasks=[self.__task], func=start) tasks.run() tasks.wait_for_completion(timeout_sec=1200) assert_false(self.__task.timeout, \ message='timeout waiting for task {0}'.format(self.__task.id))
def check_chassis_task(self): """ Testing AMQP on.task.ipmi.chassis.result """ Nodes().nodes_get() nodes = loads(self.__client.last_response.data) self.__tasks = [] for node in nodes: id = node.get('id') assert_is_not_none(id) type = node.get('type') assert_is_not_none(type) if type == 'compute': worker = AMQPWorker(queue=QUEUE_CHASSIS_RESULT, \ callbacks=[self.__handle_result]) self.__tasks.append(WorkerThread(worker,id)) tasks = WorkerTasks(tasks=self.__tasks, func=self.__task_thread) tasks.run() tasks.wait_for_completion()
def test_nodes_discovery(self): """ API 2.0 Testing Graph.Discovery completion """ count = defaults.get('RACKHD_NODE_COUNT', '') if (count.isdigit() and self.check_compute_count() == int(count)) or self.check_compute_count(): LOG.warning('Nodes already discovered!') return self.__discovery_duration = datetime.now() LOG.info('Wait start time: {0}'.format(self.__discovery_duration)) self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \ callbacks=[self.handle_graph_finish]), 'discovery') def start(worker,id): worker.start() tasks = WorkerTasks(tasks=[self.__task], func=start) tasks.run() tasks.wait_for_completion(timeout_sec=1200) assert_false(self.__task.timeout, \ message='timeout waiting for task {0}'.format(self.__task.id))
def test_nodes_discovery(self): """ Testing Graph.Discovery completion """ if self.check_compute_count(): LOG.warning("Nodes already discovered!") return self.__discovery_duration = datetime.now() LOG.info("Wait start time: {0}".format(self.__discovery_duration)) self.__task = WorkerThread( AMQPWorker(queue=QUEUE_GRAPH_FINISH, callbacks=[self.handle_graph_finish]), "discovery" ) def start(worker, id): worker.start() tasks = WorkerTasks(tasks=[self.__task], func=start) tasks.run() tasks.wait_for_completion(timeout_sec=1200) assert_false(self.__task.timeout, message="timeout waiting for task {0}".format(self.__task.id))
def check_sdr_task(self): """ Testing AMQP on.task.ipmi.sdr.result """ Nodes().nodes_get() nodes = loads(self.__client.last_response.data) self.__tasks = [] for node in nodes: id = node.get('id') assert_is_not_none(id) type = node.get('type') assert_is_not_none(type) if type == 'compute': worker = AMQPWorker(queue=QUEUE_SDR_RESULT, \ callbacks=[self.__handle_result]) self.__tasks.append(WorkerThread(worker,id)) tasks = WorkerTasks(tasks=self.__tasks, func=self.__task_thread) tasks.run() tasks.wait_for_completion() for task in self.__tasks: assert_false(task.timeout, message='timeout waiting for task {0}'.format(task.id))
def test_nodes_discovery(self): # API 2.0 Testing Graph.Discovery completion count = defaults.get('RACKHD_NODE_COUNT', '') if (count.isdigit() and self.check_compute_count() == int(count)) or self.check_compute_count(): logs.warning('Nodes already discovered!') return self.__discovery_duration = datetime.now() logs.info(' Wait start time: %s', str(self.__discovery_duration)) self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, callbacks=[self.handle_graph_finish]), 'discovery') def start(worker, id): worker.start() tasks = WorkerTasks(tasks=[self.__task], func=start) tasks.run() tasks.wait_for_completion(timeout_sec=1200) self.assertFalse(self.__task.timeout, msg=('timeout waiting for task %s', self.__task.id))
def test_submit_test_event(self): # """ Testing POST /EventService/SubmitTestEvent """ global task server = Httpd(port=int(HTTPD_PORT), handler_class=self.EventServiceHandler) task = WorkerThread(server, 'httpd') worker = WorkerTasks(tasks=[task], func=self.__httpd_start) worker.run() # forward port for services running on a guest host session = open_ssh_forward(self.__httpd_port) redfish().test_event(body={}) worker.wait_for_completion(timeout_sec=60) session.logout() self.assertFalse(task.timeout, msg='timeout waiting for task {0}'.format(task.id)) self.assertFalse( self.__class__.eventHandlerFailed, msg='Event handler reported subscriptionId / memberId mismatch')
def check_sdr_task(self): """ Testing AMQP on.task.ipmi.sdr.result """ Nodes().nodes_get() nodes = loads(self.__client.last_response.data) self.__tasks = [] for node in nodes: id = node.get('id') assert_is_not_none(id) type = node.get('type') assert_is_not_none(type) if type == 'compute': worker = AMQPWorker(queue=QUEUE_SDR_RESULT, \ callbacks=[self.__handle_result]) self.__tasks.append(WorkerThread(worker, id)) tasks = WorkerTasks(tasks=self.__tasks, func=self.__task_thread) tasks.run() tasks.wait_for_completion() for task in self.__tasks: assert_false(task.timeout, message='timeout waiting for task {0}'.format( task.id))
def device_listener(nb, rack_srv): url_parsed = urlparse(rack_srv.location) amqp_url = url_parsed.netloc.split(':')[0] id = 'amqp.{0}'.format(amqp_url) def start(data, id): log.info('starting amqp listener @' + id) amqp = AMQPWorker(amqp_url=amqp_url, queue=QUEUE_GRAPH_FINISH, callbacks=[handler_cb]) add_amqp_listener(amqp, id) amqp.start() def handler_cb(body, message): r = requests.get(rack_srv.location + 'workflows') workflows = r.json() for work in workflows: definition = work.get('definition', {}) injectableName = definition.get('injectableName') if injectableName == 'Graph.SKU.Discovery': routeId = message.delivery_info.get('routing_key').split( 'graph.finished.')[1] graphId = work.get('context', {}).get('graphId') if graphId == routeId: status = body.get('status') if status == 'succeeded': options = definition.get('options') nodeid = options.get('defaults', {}).get('nodeId') r = requests.get(rack_srv.location + 'nodes/' + nodeid) add_rack_node(nb, r.json(), rack_srv) message.ack() break task = WorkerThread(rack_srv, id) worker = WorkerTasks(tasks=[task], func=start) add_worker(worker, task, id) worker.run()
def clear_queue(): task = WorkerThread(amqp_listner_worker, 'amqp') tasks = WorkerTasks(tasks=[task], func=thread_func) print'Clearing the graph.finished queue...' tasks.run() tasks.wait_for_completion(time_to_clear_queue)