def test_redfish_v1_taskservice_task_count_per_node(self): # The API /redfish/v1/TaskService/Oem/Tasks/<id> will check the count for each list of tasks # associated with all node ids. if fit_common.VERBOSITY >= 2: msg = "Description: Check the reported task count in the list of Tasks for each System" print("\n\t{0}".format(msg)) nodelist = fit_common.node_select() self.assertNotEqual(nodelist, [], 'No Nodes reported for this stack.') for node in nodelist: on_url = "/redfish/v1/TaskService/Oem/Tasks/" + node on_data = fit_common.rackhdapi(url_cmd=on_url) tasklist = [] if on_data['status'] == 200: members = on_data['json']["Members"] for member in members: taskid = member['Id'] tasklist.append(taskid) taskcount = int(on_data['json']['*****@*****.**']) listcount = len(tasklist) self.assertEqual( taskcount, listcount, "Reported task count {0} not equal length of tasklist {1}". format(taskcount, listcount)) if fit_common.VERBOSITY >= 2: print("\tNodeID: {0} Number of tasks reported {1}".format( node, taskcount))
def setUpClass(cls): # Get the list of nodes NODECATALOG = fit_common.node_select() # Select one node at random cls.__NODE = NODECATALOG[random.randint(0, len(NODECATALOG) - 1)] # delete active workflows for specified node fit_common.cancel_active_workflows(cls.__NODE)
def test_02_get_catalog_source(self): print "============== Displaying Catalog Sources" nodes = fit_common.node_select() if len(nodes) == 0: print "No Nodes found on RackHD server " else: inode = 0 while inode < len(nodes): print("") nn = nodes[inode] print "Node: " + nn monurl = "/api/2.0/nodes/" + nn + "/catalogs" mondata = fit_common.rackhdapi(monurl) catalog = mondata['json'] result = mondata['status'] if result != 200: print "Error: failed catalog request" else: i = 0 while i < len(catalog): print "Source: " + catalog[i]["source"] i += 1 inode += 1
def test_01_get_product_info(self): print "============== Displaying Product Info" nodes = fit_common.node_select() if len(nodes) == 0: print "No Nodes found on RackHD server " else: inode = 0 while inode < len(nodes): nn = nodes[inode] print "Node: " + nn monurl = "/api/2.0/nodes/" + nn + "/catalogs/dmi" mondata = fit_common.rackhdapi(monurl) catalog = mondata['json'] result = mondata['status'] if result != 200: print "Error on catalog/dmi command" else: # Check BMC IP vs OBM IP setting print " ID: " + catalog["id"] print " Product Name : " + catalog["data"][ "System Information"]["Product Name"] print " Serial Number: " + catalog["data"][ "System Information"]["Serial Number"] print " UUID : " + catalog["data"][ "System Information"]["UUID"] inode += 1
def test04_power_on_nodes(self): # BareMetal test bed uses a power strip that we can control via telnet # Need to add calls to that utility, for now, skipping power on baremetal_testbed = True if baremetal_testbed: log.info(" *** CDU power control not implemented yet, assuming nodes powered on outside this script") log.info(" *** Sleeping 90 seconds, allow physical nodes to power on") # Allow physical nodes some time to power on time.sleep(90) return # This powers on nodes via PDU or, if no PDU, power cycles nodes via IPMI to start discovery # ServerTech PDU case if pdu_lib.check_pdu_type() != "Unknown": log.info_5('**** PDU found, powering on PDU outlets') self.assertTrue(pdu_lib.pdu_control_compute_nodes("on"), 'Failed to power on all outlets') # Wait about 30 seconds for the outlets to all come on and nodes to DHCP fit_common.countdown(30) # no PDU case else: log.info_5('**** No supported PDU found, restarting nodes using IPMI.') # Check if some nodes are already discovered nodes = [] nodes = fit_common.node_select() if nodes: log.info_5(" Nodes already discovered, skipping ipmi power reset") log.info_5(" %s", nodes) else: # Power cycle all nodes via IPMI, display warning if no nodes found if fit_common.power_control_all_nodes("off") == 0: log.info_5('**** No BMC IP addresses found in arp table, continuing without node restart.') else: # power on all nodes under any circumstances fit_common.power_control_all_nodes("on")
def setUpClass(cls): # Get the list of nodes NODECATALOG = fit_common.node_select() assert (len(NODECATALOG) != 0), "There are no nodes currently discovered" # Select one node at random cls.__NODE = NODECATALOG[random.randint(0, len(NODECATALOG) - 1)] # Print node Id, node BMC mac ,node type nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + cls.__NODE)['json'] nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name'] monurl = "/api/2.0/nodes/" + cls.__NODE + "/catalogs/bmc" mondata = fit_common.rackhdapi(monurl, action="get") catalog = mondata['json'] bmcresult = mondata['status'] if bmcresult != 200: log.info_1(" Node ID: " + cls.__NODE) log.info_1(" Error on catalog/bmc command") else: log.info_1(" Node ID: " + cls.__NODE) log.info_1(" Node SKU: " + nodesku) log.info_1(" Node BMC Mac: %s", catalog.get('data')['MAC Address']) log.info_1(" Node BMC IP Addr: %s", catalog.get('data')['IP Address']) log.info_1(" Node BMC IP Addr Src: %s", catalog.get('data')['IP Address Source']) # delete active workflows for specified node result = fit_common.cancel_active_workflows(cls.__NODE) assert ( result is True ), "There are still some active workflows running against the node"
def test_api_20_lookups_post_get_delete(self): node = fit_common.node_select()[0] data_payload = { "macAddress": "00:0a:0a:0a:0a:0a", "ipAddress": "128.128.128.128", "node": node } api_data = fit_common.rackhdapi("/api/2.0/lookups", action="post", payload=data_payload) self.assertEqual( api_data['status'], 201, 'Incorrect HTTP return code, expected 201, got:' + str(api_data['status'])) lookup_id = api_data['json']['id'] api_data = fit_common.rackhdapi("/api/2.0/lookups/" + lookup_id) self.assertEqual( api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status'])) self.assertEqual(api_data['json']['macAddress'], "00:0a:0a:0a:0a:0a", "Bad lookup MAC Address") self.assertEqual(api_data['json']['ipAddress'], "128.128.128.128", "Bad lookup IP Address") self.assertEqual(api_data['json']['node'], node, "Bad lookup node ID") api_data = fit_common.rackhdapi("/api/2.0/lookups/" + lookup_id, action="delete") self.assertEqual( api_data['status'], 204, 'Incorrect HTTP return code, expected 204, got:' + str(api_data['status']))
def setUpClass(cls): # Get the list of nodes NODECATALOG = fit_common.node_select() assert (len(NODECATALOG) != 0), "There are no nodes currently discovered" # Select one node at random cls.__NODE = NODECATALOG[random.randint(0, len(NODECATALOG) - 1)] # Print node Id, node BMC mac ,node type nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + cls.__NODE)['json'] nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name'] monurl = "/api/2.0/nodes/" + cls.__NODE + "/catalogs/bmc" mondata = fit_common.rackhdapi(monurl, action="get") catalog = mondata['json'] bmcresult = mondata['status'] if bmcresult != 200: log.info_1(" Node ID: " + cls.__NODE) log.info_1(" Error on catalog/bmc command") else: log.info_1(" Node ID: " + cls.__NODE) log.info_1(" Node SKU: " + nodesku) log.info_1(" Node BMC Mac: %s", catalog.get('data')['MAC Address']) log.info_1(" Node BMC IP Addr: %s", catalog.get('data')['IP Address']) log.info_1(" Node BMC IP Addr Src: %s", catalog.get('data')['IP Address Source']) # delete active workflows for specified node result = fit_common.cancel_active_workflows(cls.__NODE) assert (result is True), "There are still some active workflows running against the node"
def test_redfish_v1_taskservice_task_count_per_node(self): # The API /redfish/v1/TaskService/Oem/Tasks/<id> will check the count for each list of tasks # associated with all node ids. if fit_common.VERBOSITY >= 2: msg = "Description: Check the reported task count in the list of Tasks for each System" print("\n\t{0}".format(msg)) nodelist = fit_common.node_select() self.assertNotEqual(nodelist, [], 'No Nodes reported for this stack.') for node in nodelist: on_url = "/redfish/v1/TaskService/Oem/Tasks/" + node on_data = fit_common.rackhdapi(url_cmd=on_url) tasklist = [] if on_data['status'] == 200: members = on_data['json']["Members"] for member in members: taskid = member['@odata.id'].split('/')[-1] tasklist.append(taskid) taskcount = int(on_data['json']['*****@*****.**']) listcount = len(tasklist) self.assertEqual(taskcount, listcount, "Reported task count {0} not equal length of tasklist {1}".format(taskcount, listcount)) if fit_common.VERBOSITY >= 2: print("\tNodeID: {0} Number of tasks reported {1}".format(node, taskcount))
def test_refresh_compute_nodes(self): # successful test here nodelist = fit_common.node_select() for node in nodelist: payload = { "name":"Graph.Refresh.Immediate.Discovery", "options":{"create-default-pollers":{"nodeId": node}} } fit_common.rackhdapi("/api/2.0/nodes/" + node + "/workflows", action="post", payload=payload)
def test09_install_obm_credentials(self): print "**** Install OBM credentials." # install OBM credentials via workflows count = 0 for creds in fit_common.GLOBAL_CONFIG['credentials']['bmc']: # greate graph for setting OBM credentials payload = \ { "friendlyName": "IPMI" + str(count), "injectableName": 'Graph.Obm.Ipmi.CreateSettings' + str(count), "options": { "obm-ipmi-task":{ "user": creds["username"], "password": creds["password"] } }, "tasks": [ { "label": "obm-ipmi-task", "taskName": "Task.Obm.Ipmi.CreateSettings" } ] } api_data = fit_common.rackhdapi("/api/1.1/workflows", action="put", payload=payload) self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expecting 200, got ' + str(api_data['status'])) count += 1 print "**** Configure node OBM settings." # run each OBM credential workflow on each node until success nodelist = fit_common.node_select() for node in nodelist: for num in range(0, count): status = "" workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings' + str(num)} # wait for existing workflow to complete for dummy in range(0, MAX_CYCLES): result = fit_common.rackhdapi("/api/1.1/nodes/" + node + "/workflows", action="post", payload=workflow) if result['status'] != 201: fit_common.time.sleep(5) else: break # wait for OBM workflow to complete for dummy in range(0, MAX_CYCLES): fit_common.time.sleep(10) status = fit_common.rackhdapi("/api/1.1/workflows/" + result['json']["instanceId"])['json']['_status'] if status != "running" and status != "pending": break if status == "succeeded": break
def test_api_11_lookups_post_get_delete(self): node = fit_common.node_select()[0] data_payload = { "macAddress": "00:0a:0a:0a:0a:0a", "ipAddress": "128.128.128.128", "node": node } api_data = fit_common.rackhdapi("/api/1.1/lookups", action="post", payload=data_payload) self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status'])) lookup_id = api_data['json']['id'] api_data = fit_common.rackhdapi("/api/1.1/lookups/" + lookup_id) self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status'])) self.assertEqual(api_data['json']['macAddress'], "00:0a:0a:0a:0a:0a", "Bad lookup MAC Address") self.assertEqual(api_data['json']['ipAddress'], "128.128.128.128", "Bad lookup IP Address") self.assertEqual(api_data['json']['node'], node, "Bad lookup node ID") api_data = fit_common.rackhdapi("/api/1.1/lookups/" + lookup_id, action="delete") self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
def setUpClass(cls): # class method run once per script # default base payload for Rediscovery Graph cls.__payload = { "name": "Graph.Refresh.Immediate.Discovery", "options": { "reset-at-start": { "nodeId": "NODEID" }, "discovery-refresh-graph": { "graphOptions": { "target": "NODEID" }, "nodeId": "NODEID" }, "generate-sku": { "nodeId": "NODEID" }, "generate-enclosure": { "nodeId": "NODEID" }, "create-default-pollers": { "nodeId": "NODEID" }, "run-sku-graph": { "nodeId": "NODEID" }, "nodeId": "NODEID" } } # Get the list of nodes nodelist = fit_common.node_select(no_unknown_nodes=True) assert (len(nodelist) != 0), "No valid nodes discovered" # Select one node at random cls.__nodeid = nodelist[random.randint(0, len(nodelist) - 1)] # Delete active workflows for specified node fit_common.cancel_active_workflows(cls.__nodeid) cls.__previous_ipmi_user = None
def test_redfish_v1_taskservice_tasks_per_node(self): # The API TaskService/Oem/Tasks/<systemid> will display a list of all tasks that # are associated with the specified node id for managed systems. # if verbose if fit_common.VERBOSITY >= 2: msg = "Description: Display the list of Tasks for each System" print("\n\t{0}".format(msg)) nodelist = fit_common.node_select() if fit_common.VERBOSITY >= 2: print "Nodelist: " print json.dumps(nodelist, indent=4) self.assertNotEqual(nodelist, [], 'No Nodes reported for this stack.') for node in nodelist: tasklist = get_node_tasklist(node) self.assertNotEqual(tasklist, [], 'No Tasks listed for node.') for taskid in tasklist: taskdata = get_taskid_data(taskid) if fit_common.VERBOSITY >= 2: print_taskid_data(taskid, taskdata)
def test04_power_on_nodes(self): # This powers on nodes via PDU or, if no PDU, power cycles nodes via IPMI to start discovery # ServerTech PDU case if pdu_lib.check_pdu_type() != "Unknown": log.info_5('**** PDU found, powering on PDU outlets') self.assertTrue(pdu_lib.pdu_control_compute_nodes("on"), 'Failed to power on all outlets') # Wait about 30 seconds for the outlets to all come on and nodes to DHCP fit_common.countdown(30) # no PDU case else: log.info_5('**** No supported PDU found, restarting nodes using IPMI.') # Check if some nodes are already discovered nodes = [] nodes = fit_common.node_select() if nodes: log.info_5(" Nodes already discovered, skipping ipmi power reset") log.info_5(" %s", nodes) else: # Power cycle all nodes via IPMI, display warning if no nodes found if fit_common.power_control_all_nodes("off") == 0: log.info_5('**** No BMC IP addresses found in arp table, continuing without node restart.') else: # power on all nodes under any circumstances fit_common.power_control_all_nodes("on")
def test_01_get_product_info(self): print "============== Displaying Product Info" nodes = fit_common.node_select() if len(nodes) == 0: print "No Nodes found on RackHD server " else: inode = 0 while inode < len(nodes): nn = nodes[inode] print "Node: " + nn monurl = "/api/2.0/nodes/" + nn + "/catalogs/dmi" mondata = fit_common.rackhdapi(monurl) catalog = mondata['json'] result = mondata['status'] if result != 200: print "Error on catalog/dmi command" else: # Check BMC IP vs OBM IP setting print " ID: " + catalog["id"] print " Product Name : " + catalog["data"]["System Information"]["Product Name"] print " Serial Number: " + catalog["data"]["System Information"]["Serial Number"] print " UUID : " + catalog["data"]["System Information"]["UUID"] inode += 1
''' Copyright 2016, EMC, Inc. Purpose: This test script performs the RedFish API ComputerSystem.Reset and verify the task status is correct and the power command occurs ''' import fit_path # NOQA: unused import import time import json import fit_common import test_api_utils from nose.plugins.attrib import attr # get list of compute nodes once for this test suite NODELIST = fit_common.node_select() def print_taskid_data(taskid, taskid_json): """ This utility displays the taskjson data for the user :param taskjson: valid taskid json structure """ print "\n\tTaskId: ", taskid print "\tSystem ID: ", taskid_json["Oem"]["RackHD"].get('SystemId', "") print "\tTask State ", taskid_json.get('TaskState', "") print "\tTask Status ", taskid_json.get('TaskStatus', "") print "\tStartTime: ", taskid_json.get('StartTime', "") print "\tEndTime: ", taskid_json.get('EndTime', "") print "\tName: ", taskid_json.get('Name', "")
def test08_install_obm_credentials(self): print "**** Install OBM credentials." # install OBM credentials via workflows count = 0 for creds in fit_common.GLOBAL_CONFIG['credentials']['bmc']: # greate graph for setting OBM credentials payload = \ { "friendlyName": "IPMI" + str(count), "injectableName": 'Graph.Obm.Ipmi.CreateSettings' + str(count), "options": { "obm-ipmi-task":{ "user": creds["username"], "password": creds["password"] } }, "tasks": [ { "label": "obm-ipmi-task", "taskName": "Task.Obm.Ipmi.CreateSettings" } ] } api_data = fit_common.rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload) self.assertEqual( api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got ' + str(api_data['status'])) count += 1 print "**** Configure node OBM settings." # run each OBM credential workflow on each node until success nodelist = fit_common.node_select() succeeded = True for node in nodelist: for num in range(0, count): status = "" workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings' + str(num)} # wait for existing workflow to complete for dummy in range(0, MAX_CYCLES): result = fit_common.rackhdapi("/api/2.0/nodes/" + node + "/workflows", action="post", payload=workflow) if result['status'] != 201: fit_common.time.sleep(5) else: break # wait for OBM workflow to complete counter = 0 for counter in range(0, MAX_CYCLES): fit_common.time.sleep(10) status = fit_common.rackhdapi( "/api/2.0/workflows/" + result['json']["instanceId"])['json']['_status'] if status != "running" and status != "pending": break if status == "succeeded": break if counter == MAX_CYCLES: succeeded = False print "*** Node failed OBM settings:", node self.assertTrue(succeeded, "OBM settings failed.")
Copyright 2016, EMC, Inc. Author(s): George Paulos ''' import os import sys import subprocess sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/fit_tests/common") import fit_common # Local methods NODECATALOG = fit_common.node_select() def _delete_active_tasks(node): for dummy in range(1,10): if fit_common.rackhdapi('/api/current/nodes/' + node + '/workflows/active', action='delete')['status'] in [204, 404]: return True else: fit_common.time.sleep(10) return False # Select test group here using @attr from nose.plugins.attrib import attr @attr(all=True, regression=True, smoke=True) class redfish10_api_systems(fit_common.unittest.TestCase): def test_redfish_v1_systems(self):
Copyright 2016, EMC, Inc. Author(s): George Paulos ''' import os import sys import subprocess sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/common") import fit_common # Local methods MON_NODES = fit_common.node_select() # Select test group here using @attr from nose.plugins.attrib import attr @attr(all=True, regression=True, smoke=True) class rackhd11_api_catalogs(fit_common.unittest.TestCase): def test_api_11_catalogs(self): api_data = fit_common.rackhdapi('/api/1.1/catalogs') self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status'])) self.assertNotEqual(len(api_data['json']), 0, "Error, no catalog") for item in api_data['json']: # check required fields for subitem in ['createdAt', 'node', 'source', 'updatedAt', 'data']: if fit_common.VERBOSITY >= 2: print "Checking:", item['id'], subitem
''' Copyright 2016, EMC, Inc. Author(s): George Paulos ''' import fit_path # NOQA: unused import import os import sys import subprocess import fit_common # Local methods NODECATALOG = fit_common.node_select() def _delete_active_tasks(node): for dummy in range(1, 10): if fit_common.rackhdapi('/api/current/nodes/' + node + '/workflows/active', action='delete')['status'] in [204, 404]: return True else: fit_common.time.sleep(10) return False # Select test group here using @attr from nose.plugins.attrib import attr
''' import os import sys import subprocess import json import pprint # set path to common libraries sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/common") import fit_common import test_api_utils # Globals NODELIST = fit_common.node_select() if NODELIST == []: print "No nodes found on stack" exit; # routine to grab BMC and RMM info def mon_get_ip_info( node ): ''' This routine will grab the IP information from the compute node ''' # Get RackHD node info nodeurl = "/api/1.1/nodes/" + node nodedata = fit_common.rackhdapi(nodeurl, action="get") nodeinfo = nodedata['json'] result = nodedata['status']
George Paulos ''' import os import sys import subprocess # set path to common libraries sys.path.append( subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/fit_tests/common") import fit_common # Local methods MON_NODES = fit_common.node_select() # Select test group here using @attr from nose.plugins.attrib import attr @attr(all=True, regression=True, smoke=True) class rackhd11_api_tags(fit_common.unittest.TestCase): def test_api_11_nodes_ID_tags(self): # iterate through nodes for nodeid in MON_NODES: #add tag api_data = fit_common.rackhdapi( "/api/1.1/nodes/" + nodeid + "/tags", action="patch", payload={"tags": ["test_tag_" + nodeid]})