def test_node_patch(self): """ Verify PATCH:/nodes/:id """ data = {"name": 'fake_name_test'} Nodes().nodes_get() nodes = self.__get_data() codes = [] for n in nodes: if n.get('name') == 'test_compute_node': uuid = n.get('id') Nodes().nodes_identifier_patch(uuid, data) rsp = self.__client.last_response test_nodes = self.__get_data() assert_equal(test_nodes.get('name'), 'fake_name_test', 'Oops patch failed') codes.append(rsp) LOG.info('Restoring name to "test_compute_node"') correct_data = {"name": 'test_compute_node'} Nodes().nodes_identifier_patch(uuid, correct_data) rsp = self.__client.last_response restored_nodes = self.__get_data() assert_equal(restored_nodes.get('name'), 'test_compute_node', 'Oops restoring failed') codes.append(rsp) assert_not_equal(0, len(codes), message='Failed to find compute node Ids') for c in codes: assert_equal(200, c.status, message=c.reason) assert_raises(rest.ApiException, Nodes().nodes_identifier_patch, 'fooey', data)
def __post_workflows(self, graph_name, body): # POST workflows without listening to AMQP about status Nodes().nodes_get() nodes = loads(self.client.last_response.data) for n in nodes: if n.get('type') == 'compute': id = n.get('id') assert_not_equal(id,None) try: Nodes().nodes_identifier_workflows_active_delete(id) except Exception,e: assert_equal(404, e.status, message = 'status should be 404') # Verify the active workflow has been deleted # If the post workflow API was called immediatly after deleting active workflow, # the API would fail at the first time and retry, though actually the workflow was issued twice # in a consecutive manner, which would bring malfunction of vBMC retries = 5 Nodes().nodes_identifier_workflows_active_get(id) status = self.client.last_response.status while status != 204 and retries != 0: LOG.warning('Workflow status for Node {0} (status={1},retries={2})'.format(id,status,retries)) sleep(1) retries -= 1 Nodes().nodes_identifier_workflows_active_get(id) status = self.client.last_response.status assert_equal(204, status, message = 'status should be 204') Nodes().nodes_identifier_workflows_post(id,name=graph_name,body=body)
def _set_ipmi(self, uid): user, passwd = get_bmc_cred() mac = None Nodes().nodes_identifier_catalogs_source_get(uid,'bmc') rsp = self.__client.last_response bmc = loads(rsp.data) if 'data' in bmc: mac = bmc['data'].get('MAC Address') else: Nodes().nodes_identifier_catalogs_source_get(uid,'rmm') rsp = self.__client.last_response rmm = loads(rsp.data) if 'data' in rmm: mac = bmc['data'].get('MAC Address') if mac is not None: LOG.debug('BMC MAC {0} for {1}'.format(mac,uid)) setting = { 'obmSettings': [{ 'service':'ipmi-obm-service', 'config': { 'user':user, 'password':passwd, 'host': mac } }] } LOG.info('Creating ipmi obm-settings for node {0} \n {1}'.format(uid,setting)) try: Nodes().nodes_identifier_patch(uid,setting) except rest.ApiException as e: LOG.error(e) return False else: LOG.error('Error finding configurable IPMI MAC address for {0}'.format(uid)) return False
def test_node_pollers(self): """ Test /nodes/:id/pollers are running """ Nodes().nodes_get() nodes = loads(self.__client.last_response.data) LOG.debug(nodes, json=True) samples = [] valid = False for n in nodes: LOG.info(n) if n.get('type') == 'compute': uuid = n.get('id') Nodes().nodes_identifier_pollers_get(uuid) rsp = self.__client.last_response data = loads(rsp.data) assert_equal(200, rsp.status, message=rsp.reason) assert_not_equal(0, len(data), \ message='Failed to find poller for nodes {0}'.format(n.get('id'))) samples.append(data[0]) for sample in samples: count = 18 # Wait for 3 mins (poller interval is 1 min) while valid == False: try: Templates().pollers_identifier_data_get(sample.get('id')) valid = True except Exception, e: LOG.warning('Poller {0} doesn\'t work normally'.format( sample.get('id'))) time.sleep(10) count -= 1 assert_not_equal(0, count, \ message='Poller {0} failed to get data'.format(sample.get('id')))
def delete_all_active_workflows(self): Nodes().nodes_get() nodes = loads(self.__client.last_response.data) for node in nodes: if node.get('type') == 'compute': id = node.get('id') assert_not_equal(id, None) try: Nodes().nodes_identifier_workflows_active_delete(id) except rest.ApiException as err: LOG.warning(err)
def test_node_catalogs_bysource(self): """ Testing GET id:/catalogs/source """ resps = [] Nodes().nodes_get() nodes = self.__get_data() for n in nodes: if n.get('type') == 'compute': Nodes().nodes_identifier_catalogs_source_get( n.get('id'),'bmc') resps.append(self.__client.last_response) for resp in resps: assert_equal(200,resp.status, message=resp.reason) assert_raises(rest.ApiException, Nodes().nodes_identifier_catalogs_source_get, 'fooey','bmc')
def test_node_workflows_get(self): """Testing node GET:id/workflows""" resps = [] Nodes().nodes_get() nodes = self.__get_data() for n in nodes: if n.get('type') == 'compute': Nodes().nodes_identifier_workflows_get(n.get('id')) resps.append(self.__get_data()) for resp in resps: assert_not_equal(0, len(resp), message='No Workflows found for Node') assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_get, 'fooey')
def test_node_catalogs(self): """ Testing GET id:/catalogs """ resps = [] Nodes().nodes_get() nodes = self.__get_data() for n in nodes: if n.get('type') == 'compute': Nodes().nodes_identifier_catalogs_get( n.get('id')) resps.append(self.__get_data()) for resp in resps: assert_not_equal(0, len(resp), message='Node catalog is empty!') assert_raises(rest.ApiException, Nodes().nodes_identifier_catalogs_get, 'fooey')
def test_node_workflows_active(self): """Testing node GET:id/workflows/active""" resps = [] Nodes().nodes_get() nodes = loads(self.__client.last_response.data) for n in nodes: if n.get('type') == 'compute': Nodes().nodes_identifier_workflows_active_get(n.get('id')) resps.append(self.__client.last_response.data) for resp in resps: assert_not_equal(0, len(loads(resp)), message='No active Workflows found for Node') assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_active_get, 'fooey')
def test_whitelist_node_delete(self): """ Verify Delete:/nodes/:mac/dhcp/whitelist """ Nodes().nodes_get() nodes = self.__get_data() for n in nodes: for i in n: if i == 'identifiers': if len(n[i]) > 0: macaddress = n[i] macaddress_to_delete = macaddress[len(macaddress) - 1] LOG.info('Deleting macaddress {0}'.format(macaddress_to_delete)) Nodes().nodes_macaddress_dhcp_whitelist_delete(macaddress_to_delete) rsp = self.__client.last_response assert_equal(204, rsp.status, message=rsp.reason)
def redfish_discovery_test(self): """ Testing Redfish Service Discovery """ user, passwd = get_cred('redfish') assert_is_not_none(user) assert_is_not_none(passwd) body = { 'options': { 'defaults': { 'username': user, 'password': passwd, 'uri': URI } } } if IS_EMC: body['options']['when-catalog-emc'] = { 'autoCatalogEmc': 'true' } body['options']['when-pollers-emc'] = { 'autoCreatePollerEmc': 'true' } self.__post_unbound_workflow('Graph.Redfish.Discovery', body) Nodes().nodes_get() nodes = self.__get_data() settings = [] for node in nodes: if node.get('type') == 'enclosure': for obm in node.get('obmSettings', []): if obm.get('service') == 'redfish-obm-service': self.__nodes.append(node) config = obm.get('config') assert_equal(URI, config.get('uri'), \ message = "Unexpected Redfish URI") assert_not_equal(len(self.__nodes), 0, message='Missing Redfish Enclosures')
def test_node_create(self): """ Verify POST:/nodes/ """ for n in self.__test_nodes: LOG.info('Creating node (name={0})'.format(n.get('name'))) Nodes().nodes_post(n) rsp = self.__client.last_response assert_equal(201, rsp.status, message=rsp.reason)
def test_node_workflows_post(self): """Testing node POST:id/workflows""" resps = [] Nodes().nodes_get() nodes = self.__get_data() for n in nodes: if n.get('type') == 'compute': id = n.get('id') timeout = self.__post_workflow(id,'Graph.Discovery',{}) if timeout > 0: data = self.__get_data() resps.append({'data': data, 'id':id}) for resp in resps: assert_not_equal(0, len(resp['data']), message='No Workflows found for Node {0}'.format(resp['id'])) assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_post, 'fooey','Graph.Discovery',body={})
def __init__(self): self.__client = config.api_client self.__nodes = [] Nodes().nodes_get() for node in (loads(self.__client.last_response.data)): if node['type'] == 'compute': self.__nodes.append(node)
def test_node_id_obm_identify_create(self): """ Testing POST:/nodes/:id/obm/identify """ Nodes().nodes_get() nodes = loads(self.__client.last_response.data) codes = [] data = {"value": "true"} for n in nodes: if n.get('type') == 'compute': uuid = n.get('id') Nodes().nodes_identifier_obm_identify_post(uuid, data) rsp = self.__client.last_response codes.append(rsp) for c in codes: assert_equal(200, c.status, message=c.reason) assert_raises(rest.ApiException, Nodes().nodes_identifier_obm_identify_post, 'fooey', data)
def test_node_workflows_del_active(self): """Testing node DELETE:id/workflows/active""" Nodes().nodes_get() nodes = loads(self.__client.last_response.data) for n in nodes: if n.get('type') == 'compute': id = n.get('id') assert_is_not_none(id) Nodes().nodes_identifier_workflows_active_delete(id) assert_equal( 0, len(self.__client.last_response.data), message='No active Workflows found for Node {0}'.format( id)) assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_active_delete, 'fooey')
def __get_workflow_status(self, id): Nodes().nodes_identifier_workflows_active_get(id) status = self.__client.last_response.status if status == 200: data = self.__get_data() status = data.get('_status') assert_is_not_none(status) return status
def test_node_id(self): """ Testing GET:/nodes/:id """ Nodes().nodes_get() nodes = self.__get_data() LOG.debug(nodes,json=True) codes = [] for n in nodes: LOG.info(n) if n.get('type') == 'compute': uuid = n.get('id') Nodes().nodes_identifier_get(uuid) rsp = self.__client.last_response codes.append(rsp) assert_not_equal(0, len(codes), message='Failed to find compute node Ids') for c in codes: assert_equal(200, c.status, message=c.reason) assert_raises(rest.ApiException, Nodes().nodes_identifier_get, 'fooey')
def post_workflows(self, graph_name, \ timeout_sec=300, nodes=[], data={}, \ tasks=[], callback=None, run_now=True): self.__graph_name = graph_name self.__graph_status = [] if len(nodes) == 0: Nodes().nodes_get() for n in loads(self.__client.last_response.data): if n.get('type') == 'compute': nodes.append(n.get('id')) if callback == None: callback = self.handle_graph_finish for node in nodes: LOG.info('Starting AMQP listener for node {0}'.format(node)) worker = AMQPWorker(queue=QUEUE_GRAPH_FINISH, callbacks=[callback]) thread = WorkerThread(worker, node) self.__tasks.append(thread) tasks.append(thread) try: Nodes().nodes_identifier_workflows_active_delete(node) except ApiException as e: assert_equal(HTTP_NOT_FOUND, e.status, \ message = 'status should be {0}'.format(HTTP_NOT_FOUND)) except (TypeError, ValueError) as e: assert(e.message) retries = 5 Nodes().nodes_identifier_workflows_active_get(node) status = self.__client.last_response.status while status != HTTP_NO_CONTENT and retries != 0: status = self.__client.last_response.status LOG.warning('Workflow status for Node {0} (status={1},retries={2})' \ .format(node, status, retries)) time.sleep(1) retries -= 1 Nodes().nodes_identifier_workflows_active_get(node) assert_equal(HTTP_NO_CONTENT, status, \ message = 'status should be {0}'.format(HTTP_NO_CONTENT)) Nodes().nodes_identifier_workflows_post(node, name=graph_name, body=data) if run_now: self.run_workflow_tasks(self.__tasks, timeout_sec)
def __check_compute_count(self): Nodes().nodes_get() nodes = loads(self.client.last_response.data) count = 0 for n in nodes: type = n.get('type') if type == 'compute': count += 1 return count
def test_node_id_obm(self): """ Testing GET:/nodes/:id/obm """ Nodes().nodes_get() nodes = self.__get_data() LOG.debug(nodes,json=True) codes = [] for n in nodes: if n.get('name') == 'test_compute_node': uuid = n.get('id') Nodes().nodes_identifier_obm_get(uuid) rsp = self.__client.last_response LOG.info('OBM setting for node ID {0} is {1}'.format(uuid, rsp.data)) codes.append(rsp) assert_not_equal(0, len(codes), message='Failed to find compute node Ids') for c in codes: assert_equal(200, c.status, message=c.reason) assert_raises(rest.ApiException, Nodes().nodes_identifier_obm_get, 'fooey')
def callback(body, message): message.ack() for node in self.__get_enclosure_ids(): Nodes().nodes_identifier_workflows_active_get(node) if self.__client.last_response.status == 204: for task in tasks: if task.id == node: task.worker.stop() task.running = False
def check_compute_count(self): Nodes().nodes_get() nodes = self.__get_data() count = 0 for n in nodes: type = n.get('type') if type == 'compute': count += 1 return count
def test_whitelist_node_delete(self): """ Verify Delete:/nodes/:mac/dhcp/whitelist """ Nodes().nodes_get() nodes = self.__get_data() macList = [] for n in nodes: type = n.get('type') assert_is_not_none(type) if type == 'compute': idList = n.get('identifiers') assert_is_not_none(idList) if len(idList) > 0: macList.append(idList[0]) # grab the first mac for addr in macList: LOG.info('Deleting macaddress {0}'.format(addr)) Nodes().nodes_macaddress_dhcp_whitelist_delete(addr) rsp = self.__client.last_response assert_equal(204, rsp.status, message=rsp.reason)
def __get_enclosure_ids(self): ids = [] Nodes().nodes_get() nodes = self.__get_data() for node in nodes: if node.get('type') == 'enclosure': for obm in node.get('obmSettings', []): if 'redfish-obm-service' == obm.get('service'): ids.append(node.get('id')) return ids
def test_node_delete(self): """ Testing DELETE:/nodes/:id """ codes = [] test_names = [] Nodes().nodes_get() nodes = self.__get_data() test_names = [t.get('name') for t in self.__test_nodes] for n in nodes: name = n.get('name') if name in test_names: uuid = n.get('id') LOG.info('Deleting node {0} (name={1})'.format(uuid, name)) Nodes().nodes_identifier_delete(uuid) codes.append(self.__client.last_response) assert_not_equal(0, len(codes), message='Delete node list empty!') for c in codes: assert_equal(200, c.status, message=c.reason) assert_raises(rest.ApiException, Nodes().nodes_identifier_delete, 'fooey')
def redfish_emc_catalogs_test(self): """ Testing EMC Redfish Service Catalog """ for node in self.__nodes: id = node.get('id') assert_is_not_none(id) Nodes().nodes_identifier_catalogs_get(id) catalog = self.__get_data() assert_not_equal(len(catalog), 0, message='EMC Redfish Catalog size failure') for data in catalog: assert_not_equal(len(data), 0, message='Unexpected EMC Catalog data size')
def test_node_workflows_del_active(self): """Testing node DELETE:id/workflows/active""" Nodes().nodes_get() nodes = self.__get_data() for n in nodes: if n.get('type') == 'compute': id = n.get('id') timeout = 5 done = False while timeout > 0 and done == False: if 0 == self.__post_workflow(id,'Graph.Discovery',{}): fail('Timed out waiting for graph to start!') try: Nodes().nodes_identifier_workflows_active_delete(id) done = True except rest.ApiException as e: if e.status != 404: raise e timeout -= 1 assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_active_delete, 'fooey')
def __post_workflow(self, id, graph_name, data): status = self.__get_workflow_status(id) if status != 'pending' and status != 'running': Nodes().nodes_identifier_workflows_post(id,graph_name,body=data) timeout = 20 while status != 'pending' and status != 'running' and timeout != 0: LOG.warning('Workflow status for Node {0} (status={1},timeout={2})'.format(id,status,timeout)) status = self.__get_workflow_status(id) sleep(1) timeout -= 1 return timeout
def test_node_workflows_post(self): """Testing node POST:id/workflows""" resps = [] Nodes().nodes_get() nodes = loads(self.__client.last_response.data) for n in nodes: if n.get('type') == 'compute': Nodes().nodes_identifier_workflows_post(n.get('id'), 'Graph.Discovery', body={}) resps.append(self.__client.last_response.data) for resp in resps: assert_not_equal(0, len(loads(resp)), message='No Workflows found for Node') assert_raises(rest.ApiException, Nodes().nodes_identifier_workflows_post, 'fooey', 'Graph.Discovery', body={})