Ejemplo n.º 1
0
    def test_submit_test_event(self):
        """ Testing POST /EventService/SubmitTestEvent  """
        global task
        # Suppose rackhd and test stack have the same localhost
        server = Httpd(port=int(self.__httpd_port),
                       handler_class=self.EventServiceHandler)
        task = WorkerThread(server, 'httpd')
        worker = WorkerTasks(tasks=[task], func=self.__httpd_start)
        worker.run()
        redfish().test_event(body={})
        worker.wait_for_completion(timeout_sec=60)
        if task.timeout:
            # Else port forward rackhd -> localhost
            server = Httpd(port=int(HTTPD_PORT),
                           handler_class=self.EventServiceHandler)
            task = WorkerThread(server, 'httpd')
            worker = WorkerTasks(tasks=[task], func=self.__httpd_start)
            worker.run()

            # forward port for services running on a guest host
            session = open_ssh_forward(self.__httpd_port)

            redfish().test_event(body={})
            worker.wait_for_completion(timeout_sec=60)
            session.logout()
        assert_false(task.timeout,
                     message='timeout waiting for task {0}'.format(task.id))
Ejemplo n.º 2
0
def rack_listener(nb):
    def start(data, id):
        last_rack_set = {}
        while worker_info[id]['task'].running == True:
            current_rack_set = nb.rack_info()
            new_rack_set = Set(current_rack_set.keys()) - Set(
                last_rack_set.keys())
            if len(new_rack_set) > 0:
                for rack_srv in new_rack_set:
                    rack = current_rack_set[rack_srv]['service']
                    r = requests.get(rack.location + 'nodes/')
                    for node in r.json():
                        try:
                            add_rack_node(nb, node, rack)
                        except Exception as e:
                            log.error('Rack {0} Exception: {1}'.format(
                                rack_srv, e.message))
                            pass
                    device_listener(nb, rack)
                last_rack_set = copy.deepcopy(current_rack_set)
            time.sleep(2)

    task = WorkerThread(None, 'rack_listener')
    worker = WorkerTasks(tasks=[task], func=start)
    add_worker(worker, task, 'rack_listener')
    worker.run()
Ejemplo n.º 3
0
    def post_workflows(self, graph_name, \
                       timeout_sec=300, nodes=[], data={}, \
                       tasks=[], callback=None, run_now=True):
        self.__graph_name = graph_name
        self.__graph_status = []
        if len(nodes) == 0:
            Api().nodes_get_all()
            nodes = loads(self.__client.last_response.data)

        if callback == None:
            callback = self.handle_graph_finish

        for n in nodes:
            if n.get('type') == 'compute':
                id = n.get('id')
                assert_not_equal(id, None)
                LOG.info('starting amqp listener for node {0}'.format(id))
                worker = AMQPWorker(queue=QUEUE_GRAPH_FINISH,
                                    callbacks=[callback])
                thread = WorkerThread(worker, id)
                self.__tasks.append(thread)
                tasks.append(thread)
                try:
                    Api().nodes_workflow_action_by_id(id,
                                                      {'command': 'cancel'})
                except ApiException as e:
                    assert_equal(404, e.status, message='status should be 404')
                except (TypeError, ValueError) as e:
                    assert (e.message)
                Api().nodes_post_workflow_by_id(id,
                                                name=self.__graph_name,
                                                body=data)

        if run_now:
            self.run_workflow_tasks(self.__tasks, timeout_sec)
Ejemplo n.º 4
0
    def post_workflows(self,
                       graph_name,
                       timeout_sec=10,
                       nodes=[],
                       data=None,
                       tasks=None,
                       callback=None,
                       run_now=True):
        self.__class__.__graph_name = graph_name
        self.__class__.__graph_status = []

        # clean up the defaults
        tasks = tasks if tasks else []
        data = data if data else {}

        if len(nodes) == 0:
            Api().nodes_get_all()
            nodes = loads(self.__client.last_response.data)

        if callback is None:
            logs.info("handle graph finish")
            callback = self.handle_graph_finish

        for n in nodes:
            if n.get('type') == 'compute':
                logs.debug("node is compute")
                id = n.get('id')
                self.assertIsNotNone(id)
                logs.debug(' Starting amqp listener for node %s', id)
                worker = AMQPWorker(queue=QUEUE_GRAPH_FINISH,
                                    callbacks=[callback])
                thread = WorkerThread(worker, id)
                self.__class__.__tasks.append(thread)
                tasks.append(thread)
                try:
                    Api().nodes_workflow_action_by_id(id,
                                                      {'command': 'cancel'})
                except ApiException as e:
                    self.assertEqual(
                        404,
                        e.status,
                        msg='Expected 404 status, received {}'.format(
                            e.status))
                except (TypeError, ValueError) as e:
                    assert (e.message)
                Api().nodes_post_workflow_by_id(
                    id, name=self.__class__.__graph_name, body=data)
                logs.info("Posted workflow %s on node %s",
                          self.__class__.__graph_name, id)

        if run_now:
            logs.info("running workflow tasks....")
            self.run_workflow_tasks(self.__class__.__tasks, timeout_sec)
Ejemplo n.º 5
0
 def _wait_until_graph_finish(self, graph_name, timevalue):
     self.__graph_name = graph_name
     self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \
                                           callbacks=[self.__handle_graph_finish]), \
                                graph_name)
     def start(worker, id):
         worker.start()
     tasks = WorkerTasks(tasks=[self.__task], func=start)
     tasks.run()
     tasks.wait_for_completion(timeout_sec=timevalue)
     assert_false(self.__task.timeout, \
         message='timeout waiting for task {0}'.format(self.__task.id))
Ejemplo n.º 6
0
def service_listener(nb):
    def start(ssdp, id):
        while worker_info[id]['task'].running == True:
            services = ssdp.discover("urn:schemas-upnp-org:service:api:1.1")
            if len(services) > 0:
                for srv in services:
                    nb.nb_add_rack(srv)
            time.sleep(5)

    task = WorkerThread(ssdp, 'service_listener')
    worker = WorkerTasks(tasks=[task], func=start)
    add_worker(worker, task, 'service_listener')
    worker.run()
Ejemplo n.º 7
0
def run_listener(q, timeout_sec=3):
    log = Log(__name__, level='INFO')
    log.info('Run AMQP listener until ctrl-c input\n {0}'.format(q))

    def thread_func(worker, id):
        worker.start()

    def signal_handler(signum, stack):
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)
    worker = AMQPWorker(queue=q)
    task = WorkerThread(worker, 'amqp')
    tasks = WorkerTasks(tasks=[task], func=thread_func)
    tasks.run()
    tasks.wait_for_completion(timeout_sec)
Ejemplo n.º 8
0
 def test_nodes_discovery(self):
     """ API 2.0 Testing Graph.Discovery completion """
     if self.check_compute_count():
         LOG.warning('Nodes already discovered!')
         return
     self.__discovery_duration = datetime.now()
     LOG.info('Wait start time: {0}'.format(self.__discovery_duration))
     self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \
                                           callbacks=[self.handle_graph_finish]), 'discovery')
     def start(worker,id):
         worker.start()
     tasks = WorkerTasks(tasks=[self.__task], func=start)
     tasks.run()
     tasks.wait_for_completion(timeout_sec=1200)
     assert_false(self.__task.timeout, \
         message='timeout waiting for task {0}'.format(self.__task.id))
Ejemplo n.º 9
0
 def check_chassis_task(self):
     """ Testing AMQP on.task.ipmi.chassis.result """
     Nodes().nodes_get()
     nodes = loads(self.__client.last_response.data)
     self.__tasks = []
     for node in nodes:
         id = node.get('id')
         assert_is_not_none(id)
         type = node.get('type')
         assert_is_not_none(type)
         if type == 'compute':
             worker = AMQPWorker(queue=QUEUE_CHASSIS_RESULT, \
                                 callbacks=[self.__handle_result])
             self.__tasks.append(WorkerThread(worker,id))
     tasks = WorkerTasks(tasks=self.__tasks, func=self.__task_thread)
     tasks.run()
     tasks.wait_for_completion()
Ejemplo n.º 10
0
    def post_unbound_workflow(self, graph_name, \
                       timeout_sec=300, data={}, \
                       tasks=[], callback=None, run_now=True):
        self.__graph_name = graph_name
        self.__graph_status = []

        if callback == None:
            callback = self.handle_graph_finish

        LOG.info('Starting AMQP listener for {0}'.format(self.__graph_name))
        worker = AMQPWorker(queue=QUEUE_GRAPH_FINISH, callbacks=[callback])
        thread = WorkerThread(worker, self.__graph_name)
        self.__tasks.append(thread)
        tasks.append(thread)
        Workflows().workflows_post(graph_name, body=data)
        if run_now:
            self.run_workflow_tasks(self.__tasks, timeout_sec)
Ejemplo n.º 11
0
    def post_workflows(self, graph_name, \
                       timeout_sec=300, nodes=[], data={}, \
                       tasks=[], callback=None, run_now=True):
        self.__graph_name = graph_name
        self.__graph_status = []
        
        if len(nodes) == 0:
            Nodes().nodes_get()
            for n in loads(self.__client.last_response.data):
                if n.get('type') == 'compute':
                    nodes.append(n.get('id'))
        
        if callback == None:
            callback = self.handle_graph_finish
        
        for node in nodes:
            LOG.info('Starting AMQP listener for node {0}'.format(node))
            worker = AMQPWorker(queue=QUEUE_GRAPH_FINISH, callbacks=[callback])
            thread = WorkerThread(worker, node)
            self.__tasks.append(thread)
            tasks.append(thread)
            
            try:
                Nodes().nodes_identifier_workflows_active_delete(node)
            except ApiException as e:
                assert_equal(HTTP_NOT_FOUND, e.status, \
                    message = 'status should be {0}'.format(HTTP_NOT_FOUND))
            except (TypeError, ValueError) as e:
                assert(e.message)

            retries = 5
            Nodes().nodes_identifier_workflows_active_get(node)
            status = self.__client.last_response.status
            while status != HTTP_NO_CONTENT and retries != 0:
                status = self.__client.last_response.status
                LOG.warning('Workflow status for Node {0} (status={1},retries={2})' \
                    .format(node, status, retries))
                time.sleep(1)
                retries -= 1
                Nodes().nodes_identifier_workflows_active_get(node)
            assert_equal(HTTP_NO_CONTENT, status, \
                message = 'status should be {0}'.format(HTTP_NO_CONTENT))
            Nodes().nodes_identifier_workflows_post(node, name=graph_name, body=data)
        if run_now:
            self.run_workflow_tasks(self.__tasks, timeout_sec)
Ejemplo n.º 12
0
    def test_nodes_discovery(self):
        # API 2.0 Testing Graph.Discovery completion
        count = defaults.get('RACKHD_NODE_COUNT', '')
        if (count.isdigit() and self.check_compute_count() == int(count)) or self.check_compute_count():
            logs.warning('Nodes already discovered!')
            return
        self.__discovery_duration = datetime.now()
        logs.info(' Wait start time: %s', str(self.__discovery_duration))
        self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH,
                                              callbacks=[self.handle_graph_finish]), 'discovery')

        def start(worker, id):
            worker.start()

        tasks = WorkerTasks(tasks=[self.__task], func=start)
        tasks.run()
        tasks.wait_for_completion(timeout_sec=1200)
        self.assertFalse(self.__task.timeout,
                         msg=('timeout waiting for task %s', self.__task.id))
Ejemplo n.º 13
0
    def test_submit_test_event(self):
        # """ Testing POST /EventService/SubmitTestEvent  """
        global task
        server = Httpd(port=int(HTTPD_PORT),
                       handler_class=self.EventServiceHandler)
        task = WorkerThread(server, 'httpd')
        worker = WorkerTasks(tasks=[task], func=self.__httpd_start)
        worker.run()

        # forward port for services running on a guest host
        session = open_ssh_forward(self.__httpd_port)

        redfish().test_event(body={})
        worker.wait_for_completion(timeout_sec=60)
        session.logout()
        self.assertFalse(task.timeout,
                         msg='timeout waiting for task {0}'.format(task.id))
        self.assertFalse(
            self.__class__.eventHandlerFailed,
            msg='Event handler reported subscriptionId / memberId mismatch')
Ejemplo n.º 14
0
 def check_sdr_task(self):
     """ Testing AMQP on.task.ipmi.sdr.result """
     Nodes().nodes_get()
     nodes = loads(self.__client.last_response.data)
     self.__tasks = []
     for node in nodes:
         id = node.get('id')
         assert_is_not_none(id)
         type = node.get('type')
         assert_is_not_none(type)
         if type == 'compute':
             worker = AMQPWorker(queue=QUEUE_SDR_RESULT, \
                                 callbacks=[self.__handle_result])
             self.__tasks.append(WorkerThread(worker, id))
     tasks = WorkerTasks(tasks=self.__tasks, func=self.__task_thread)
     tasks.run()
     tasks.wait_for_completion()
     for task in self.__tasks:
         assert_false(task.timeout,
                      message='timeout waiting for task {0}'.format(
                          task.id))
Ejemplo n.º 15
0
def device_listener(nb, rack_srv):
    url_parsed = urlparse(rack_srv.location)
    amqp_url = url_parsed.netloc.split(':')[0]
    id = 'amqp.{0}'.format(amqp_url)

    def start(data, id):
        log.info('starting amqp listener @' + id)
        amqp = AMQPWorker(amqp_url=amqp_url,
                          queue=QUEUE_GRAPH_FINISH,
                          callbacks=[handler_cb])
        add_amqp_listener(amqp, id)
        amqp.start()

    def handler_cb(body, message):
        r = requests.get(rack_srv.location + 'workflows')
        workflows = r.json()
        for work in workflows:
            definition = work.get('definition', {})
            injectableName = definition.get('injectableName')
            if injectableName == 'Graph.SKU.Discovery':
                routeId = message.delivery_info.get('routing_key').split(
                    'graph.finished.')[1]
                graphId = work.get('context', {}).get('graphId')
                if graphId == routeId:
                    status = body.get('status')
                    if status == 'succeeded':
                        options = definition.get('options')
                        nodeid = options.get('defaults', {}).get('nodeId')
                        r = requests.get(rack_srv.location + 'nodes/' + nodeid)
                        add_rack_node(nb, r.json(), rack_srv)
                        message.ack()
                        break

    task = WorkerThread(rack_srv, id)
    worker = WorkerTasks(tasks=[task], func=start)
    add_worker(worker, task, id)
    worker.run()