def test_02_get_catalog_source(self): print "============== Displaying Catalog Sources" nodes = test_api_utils.monorail_get_node_list(fit_common.fitargs()['ora']) if len(nodes) == 0: print "No Nodes found on Onrack server "+ fit_common.fitargs()['ora'] else: inode=0 while inode<len(nodes): print("") nn=nodes[inode] print "Node: "+nn monurl = "/api/1.1/nodes/"+nn+"/catalogs" mondata = fit_common.rackhdapi(monurl) catalog = mondata['json'] result = mondata['status'] if result != 200: print "Error: failed catalog request" else: i = 0 while i<len(catalog): print "Source: "+catalog[i]["source"] i +=1 inode +=1
def test_02_get_catalog_source(self): print "============== Displaying Catalog Sources" nodes = test_api_utils.monorail_get_node_list( fit_common.fitargs()['ora']) if len(nodes) == 0: print "No Nodes found on Onrack server " + fit_common.fitargs( )['ora'] else: inode = 0 while inode < len(nodes): print("") nn = nodes[inode] print "Node: " + nn monurl = "/api/1.1/nodes/" + nn + "/catalogs" mondata = fit_common.rackhdapi(monurl) catalog = mondata['json'] result = mondata['status'] if result != 200: print "Error: failed catalog request" else: i = 0 while i < len(catalog): print "Source: " + catalog[i]["source"] i += 1 inode += 1
def test_01_get_product_info(self): print "============== Displaying Product Info" nodes = test_api_utils.monorail_get_node_list( fit_common.fitargs()['ora']) if len(nodes) == 0: print "No Nodes found on Onrack server " + fit_common.fitargs( )['ora'] else: inode = 0 while inode < len(nodes): nn = nodes[inode] print "Node: " + nn monurl = "/api/1.1/nodes/" + nn + "/catalogs/dmi" mondata = fit_common.rackhdapi(monurl) catalog = mondata['json'] result = mondata['status'] if result != 200: print "Error on catalog/dmi command" else: # Check BMC IP vs OBM IP setting print " ID: " + catalog["id"] print " Product Name : " + catalog["data"][ "System Information"]["Product Name"] print " Serial Number: " + catalog["data"][ "System Information"]["Serial Number"] print " UUID : " + catalog["data"][ "System Information"]["UUID"] inode += 1
def _get_serverip(self): args = fit_common.fitargs()['unhandled_arguments'] for arg in args: if "imageserver" in arg: serverip = arg.split("=")[1] return serverip return fit_common.fitcfg()["image_service"]["imageserver"]
def _get_serverip(self): args = fit_common.fitargs()['unhandled_arguments'] for arg in args: if "imageserver" in arg: serverip = arg.split("=")[1] return serverip return fit_common.fitcfg()["image_service"]["imageserver"]
def reboot_node(self, vmnum): address = "" if (vmnum == 1): address = fit_common.fitargs()['rackhd_host'] else: address = fit_common.fitargs()['rackhd_host'].replace("ora", "ora-" + str(vmnum - 1)) fit_common.remote_shell('shutdown -r now', vmnum=vmnum) time.sleep(3) for i in range(0, 15): if subprocess.call("ping -c 1 -w 5 " + address, shell=True) == 0: return True time.sleep(1) return False
def _get_tester_ip(self): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = fit_common.fitargs()['rackhd_host'] logs.debug("pinging " + ip) s.connect((ip, 0)) logs.debug('My IP address is: ' + s.getsockname()[0]) return str(s.getsockname()[0])
def test_post_alert_withIP(self): self.updateDb() sel_worker = AmqpWorker(exchange_name="on.events", topic_routing_key="node.alerts.#", external_callback=self.amqp_callback, timeout=100) sel_worker.setDaemon(True) sel_worker.start() for z in range(2): url = "http://{0}:{1}/api/2.0/notification/alerts".format( fit_common.fitargs()["rackhd_host"], HTTP_PORT) fit_common.restful(url, rest_action='post', rest_payload=self.payload) sleep(1) logs.debug_1("Post attempt {0}".format(z)) logs.debug_1("Wait for the alert on the amqp bus") self._wait_amqp_message(100) amqp_message = amqp_queue.get() logs.debug_1("Dispose the Pika connection") sel_worker.dispose() logs.debug_1("Validate the content of the amqp msg") self.validate(amqp_message) logs.debug_1("Done with the alert test")
def __init__(self, exchange_name, topic_routing_key, external_callback, timeout=10): threading.Thread.__init__(self) pika_logger = logging.getLogger('pika') if fit_common.VERBOSITY >= 8: pika_logger.setLevel(logging.DEBUG) elif fit_common.VERBOSITY >= 4: pika_logger.setLevel(logging.WARNING) else: pika_logger.setLevel(logging.ERROR) amqp_port = fit_common.fitports()['amqp_ssl'] self.connection = pika.BlockingConnection( pika.ConnectionParameters(host=fit_common.fitargs()["ora"], port=amqp_port)) self.channel = self.connection.channel() # self.channel.basic_qos(prefetch_count=1) result = self.channel.queue_declare(exclusive=True) queue_name = result.method.queue self.channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=topic_routing_key) self.channel.basic_consume(external_callback, queue=queue_name) self.connection.add_timeout(timeout, self.dispose)
def _scp_file(self, url): file_name = url.split('/')[-1] logs.debug_3("scp file %s from RackHD" % url) if not os.path.exists(file_name): path = url[6:] rackhd_hostname = fit_common.fitargs()['rackhd_host'] scp_file = fit_common.fitcreds( )['rackhd_host'][0]['username'] + '@{0}:{1}'.format( rackhd_hostname, path) cmd = 'scp -o StrictHostKeyChecking=no {0} .'.format(scp_file) logs.debug_3("scp command : '{0}'".format(cmd)) logfile_redirect = None if fit_common.VERBOSITY >= 9: logfile_redirect = sys.stdout (command_output, ecode) = pexpect.run( cmd, withexitstatus=1, events={ '(?i)assword: ': fit_common.fitcreds()['rackhd_host'][0]['password'] + '\n' }, logfile=logfile_redirect) assert ecode == 0, 'failed "{0}" because {1}. Output={2}'.format( cmd, ecode, command_output) return file_name
def __init__(self, exchange_name, topic_routing_key, external_callback, timeout=10): threading.Thread.__init__(self) pika_logger = logging.getLogger('pika') if fit_common.VERBOSITY >= 8: pika_logger.setLevel(logging.DEBUG) elif fit_common.VERBOSITY >= 4: pika_logger.setLevel(logging.WARNING) else: pika_logger.setLevel(logging.ERROR) if fit_common.API_PORT == 9090: amqp_port = fit_common.fitports()['amqp-vagrant'] else: amqp_port = fit_common.fitports()['amqp'] self.connection = pika.BlockingConnection( pika.ConnectionParameters(host=fit_common.fitargs()["ora"], port=amqp_port)) self.channel = self.connection.channel() # self.channel.basic_qos(prefetch_count=1) result = self.channel.queue_declare(exclusive=True) queue_name = result.method.queue self.channel.queue_bind( exchange=exchange_name, queue=queue_name, routing_key=topic_routing_key) self.channel.basic_consume(external_callback, queue=queue_name) self.connection.add_timeout(timeout, self.dispose)
def _get_tester_ip(self): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = fit_common.fitargs()['rackhd_host'] logs.debug("pinging " + ip) s.connect((ip, 0)) logs.debug('My IP address is: ' + s.getsockname()[0]) return str(s.getsockname()[0])
def reboot_node(self, vmnum): address = "" if (vmnum == 1): address = fit_common.fitargs()['rackhd_host'] else: address = fit_common.fitargs()['rackhd_host'].replace( "ora", "ora-" + str(vmnum - 1)) fit_common.remote_shell('shutdown -r now', vmnum=vmnum) time.sleep(3) for i in range(0, 15): if subprocess.call("ping -c 1 -w 5 " + address, shell=True) == 0: return True time.sleep(1) return False
def test_01_get_product_info(self): print "============== Displaying Product Info" nodes = test_api_utils.monorail_get_node_list(fit_common.fitargs()['ora']) if len(nodes) == 0: print "No Nodes found on Onrack server "+ fit_common.fitargs()['ora'] else: inode=0 while inode<len(nodes): nn=nodes[inode] print "Node: "+nn monurl = "/api/1.1/nodes/"+nn+"/catalogs/dmi" mondata = fit_common.rackhdapi(monurl) catalog = mondata['json'] result = mondata['status'] if result != 200: print "Error on catalog/dmi command" else: # Check BMC IP vs OBM IP setting print " ID: "+catalog["id"] print " Product Name : "+catalog["data"]["System Information"]["Product Name"] print " Serial Number: "+catalog["data"]["System Information"]["Serial Number"] print " UUID : "+catalog["data"]["System Information"]["UUID"] inode +=1
def test01_install_ova_template(self): ovafile = fit_common.fitargs()['template'] numvms = int(fit_common.fitargs()['numvms']) # Check for ovftool self.assertEqual( fit_common.subprocess.call('which ovftool', shell=True), 0, "FAILURE: 'ovftool' not installed.") # Ping for valid ESXi host self.assertEqual( fit_common.subprocess.call('ping -c 1 ' + fit_common.fitargs()['hyper'], shell=True), 0, "FAILURE: ESXi hypervisor not found.") # Run probe to check for valid OVA file rc = fit_common.subprocess.call("ovftool " + ovafile, shell=True) self.assertEqual(rc, 0, 'Invalid or missing OVA file: ' + ovafile) # check for number of virtual machine self.assertTrue(numvms < 100, "Number of vms should not be greater than 99") # Shutdown previous ORA if fit_common.subprocess.call('ping -c 1 ' + fit_common.fitargs()['rackhd_host'], shell=True) == 0: fit_common.remote_shell('shutdown -h now') fit_common.time.sleep(5) # this clears the hypervisor ssh key from ~/.ssh/known_hosts subprocess.call([ "touch ~/.ssh/known_hosts;ssh-keygen -R " + fit_common.fitargs()['hyper'] + " -f ~/.ssh/known_hosts >/dev/null 2>&1" ], shell=True) # Find correct hypervisor credentials by testing each entry in the list cred_list = fit_common.fitcreds()['hyper'] for entry in cred_list: uname = entry['username'] passwd = entry['password'] (command_output, exitstatus) = \ fit_common.pexpect.run( "ssh -q -o StrictHostKeyChecking=no -t " + uname + "@" + fit_common.fitargs()['hyper'] + " pwd", withexitstatus=1, events={"assword": passwd + "\n"}, timeout=20, logfile=None) if exitstatus == 0: break # Run OVA installer for vm in range(0, numvms): self.deploy_ova(vm, uname, passwd, numvms, ovafile)
def get_switches(): # returns a list with valid node IDs that match ARGS_LIST.sku in 'Name' or 'Model' field # and matches node BMC MAC address in ARGS_LIST.obmmac if specified # Otherwise returns list of all IDs that are not 'Unknown' or 'Unmanaged' nodelist = [] # check if user specified a single nodeid to run against # user must know the nodeid and any check for a valid nodeid is skipped nodeid = fit_common.fitargs()['nodeid'] if nodeid != 'None': nodelist.append(nodeid) else: catalog = fit_common.rackhdapi('/api/2.0/nodes') for nodeentry in catalog['json']: if nodeentry['type'] == 'switch': nodelist.append(nodeentry['id']) return nodelist
def _get_switches(): # returns a list with valid node IDs that match ARGS_LIST.sku in 'Name' or 'Model' field # and matches node BMC MAC address in ARGS_LIST.obmmac if specified # Otherwise returns list of all IDs that are not 'Unknown' or 'Unmanaged' nodelist = [] # check if user specified a single nodeid to run against # user must know the nodeid and any check for a valid nodeid is skipped nodeid = fit_common.fitargs()['nodeid'] if nodeid != 'None': nodelist.append(nodeid) else: catalog = fit_common.rackhdapi('/api/2.0/nodes') for nodeentry in catalog['json']: if nodeentry['type'] == 'switch': nodelist.append(nodeentry['id']) return nodelist
def _scp_file(self, url): file_name = url.split('/')[-1] logs.debug_3("scp file %s from RackHD" % url) if os.path.exists(file_name) is False: path = url[6:] rackhd_hostname = fit_common.fitargs()['rackhd_host'] scp_file = fit_common.fitcreds()['rackhd_host'][0]['username'] + '@{0}:{1}'.format(rackhd_hostname, path) cmd = 'scp -o StrictHostKeyChecking=no {0} .'.format(scp_file) logs.debug_3("scp command : '{0}'".format(cmd)) if fit_common.VERBOSITY >= 9: logfile_redirect = sys.stdout (command_output, ecode) = pexpect.run( cmd, withexitstatus=1, events={'(?i)assword: ': fit_common.fitcreds()['rackhd_host'][0]['password'] + '\n'}, logfile=logfile_redirect) assert ecode == 0, 'failed "{0}" because {1}. Output={2}'.format(cmd, ecode, command_output) return file_name
def __init__(self, exchange_name, topic_routing_key, external_callback, timeout=10): threading.Thread.__init__(self) pika_logger = logging.getLogger('pika') if fit_common.VERBOSITY >= 8: pika_logger.setLevel(logging.DEBUG) elif fit_common.VERBOSITY >= 4: pika_logger.setLevel(logging.WARNING) else: pika_logger.setLevel(logging.ERROR) self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=fit_common.fitargs()["rackhd_host"], port=AMQP_PORT)) self.channel = self.connection.channel() result = self.channel.queue_declare(exclusive=True) queue_name = result.method.queue self.channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=topic_routing_key) self.channel.basic_consume(external_callback, queue=queue_name) self.connection.add_timeout(timeout, self.dispose)
def changeRackHDPasswd(self, passwd, token): headers = { 'content-type': 'application/json', 'accept': 'application/json', 'authorization': token } payload = {'role': self.role, 'password': passwd} url = "https://{0}:{1}/api/2.0/users/{2}".format(fit_common.fitargs()['rackhd_host'], str(fit_common.fitports()['https']), self.username) try: r = requests.patch(url, headers=headers, data=json.dumps(payload), timeout=User.timeout, verify=False) except requests.exceptions.RequestException as e: logs.info("error, {0}".format(e)) return None if r.status_code != 200: logs.info("error status code {0}, unable to change user password".format(r.status_code)) return r.status_code
def test01_set_auth_user(self): auth_json = open('auth.json', 'w') auth_json.write('{"username":"******"api"][0]["admin_user"] + '", "password":"******"api"][0]["admin_pass"] + '", "role":"Administrator"}') auth_json.close() try: # add first user to remote rackhd directly return_code = "" rackhd_hostname = fit_common.fitargs()['rackhd_host'] set_auth_url = "https://" + rackhd_hostname + ":" + str(fit_common.fitports()['https']) + "/api/2.0/users" rc = fit_common.restful(url_command=set_auth_url, rest_action="post", rest_payload=json.load(open('auth.json'))) return_code = str(rc['status']) except Exception as e: log.info_5("ALERT: RackHD is not in localhost, will set first user through ssh{0}".format(e)) if return_code != '201': log.info_5("ALERT: Can't set first user to RackHD https port directly, will set it through ssh") # ssh login to rackhd and add first user to localhost rackhd fit_common.remote_shell('rm auth.json') fit_common.scp_file_to_ora('auth.json') rc = fit_common.remote_shell("curl -ks -X POST -H 'Content-Type:application/json' https://localhost:" + str(fit_common.fitports()['https']) + "/api/2.0/users -d @auth.json") if rc['exitcode'] != 0: log.info_5("ALERT: Auth admin user not set! Please manually set the admin user account if required.")
def test_post_alert_withIP(self): self.updateDb() sel_worker = AmqpWorker( exchange_name="on.events", topic_routing_key="node.alerts.#", external_callback=self.amqp_callback, timeout=100) sel_worker.setDaemon(True) sel_worker.start() for z in range(2): url = "http://{0}:{1}/api/2.0/notification/alerts".format(fit_common.fitargs()["rackhd_host"], HTTP_PORT) fit_common.restful(url, rest_action='post', rest_payload=self.payload) sleep(1) logs.debug_1("Post attempt {0}".format(z)) logs.debug_1("Wait for the alert on the amqp bus") self._wait_amqp_message(100) amqp_message = amqp_queue.get() logs.debug_1("Dispose the Pika connection") sel_worker.dispose() logs.debug_1("Validate the content of the amqp msg") self.validate(amqp_message) logs.debug_1("Done with the alert test")
def test01_install_ova_template(self): ovafile = fit_common.fitargs()['template'] numvms = int(fit_common.fitargs()['numvms']) # Check for ovftool self.assertEqual(fit_common.subprocess.call('which ovftool', shell=True), 0, "FAILURE: 'ovftool' not installed.") # Ping for valid ESXi host self.assertEqual(fit_common.subprocess.call('ping -c 1 ' + fit_common.fitargs()['hyper'], shell=True), 0, "FAILURE: ESXi hypervisor not found.") # Run probe to check for valid OVA file rc = fit_common.subprocess.call("ovftool " + ovafile, shell=True) self.assertEqual(rc, 0,'Invalid or missing OVA file: ' + ovafile) # check for number of virtual machine self.assertTrue(numvms < 100, "Number of vms should not be greater than 99") # Shutdown previous ORA if fit_common.subprocess.call('ping -c 1 ' + fit_common.fitargs()['rackhd_host'], shell=True) == 0: fit_common.remote_shell('shutdown -h now') fit_common.time.sleep(5) # this clears the hypervisor ssh key from ~/.ssh/known_hosts subprocess.call(["touch ~/.ssh/known_hosts;ssh-keygen -R " + fit_common.fitargs()['hyper'] + " -f ~/.ssh/known_hosts >/dev/null 2>&1"], shell=True) # Find correct hypervisor credentials by testing each entry in the list cred_list = fit_common.fitcreds()['hyper'] for entry in cred_list: uname = entry['username'] passwd = entry['password'] (command_output, exitstatus) = \ fit_common.pexpect.run( "ssh -q -o StrictHostKeyChecking=no -t " + uname + "@" + fit_common.fitargs()['hyper'] + " pwd", withexitstatus=1, events={"assword": passwd + "\n"}, timeout=20, logfile=None) if exitstatus == 0: break # Run OVA installer for vm in range(0, numvms): self.deploy_ova(vm,uname,passwd,numvms,ovafile)
def __init__(self): self.__alleged_testhost_ip = None s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = fit_common.fitargs()['rackhd_host'] monip = fit_common.fitcfg()["rackhd-config"]["apiServerAddress"] monip_obj = ipaddress.ip_address(monip) logs.irl.debug('Trying to determine testhost IP address. Hitting rackhd_host value %s first', ip) s.connect((ip, 0)) logs.debug(' ip used to generate connection to %s was %s: ', ip, s.getsockname()[0]) alleged_testhost_ip_str = s.getsockname()[0] # python2/3 flake handling. The 'unicode' keyword is gone from p3. However, although # our code is p2, hound uses p3. We can cover both by using the -type- of a unicode string! ucode_type = type(u'unicode_string_to_type') alleged_testhost_ip = ipaddress.ip_address(ucode_type(alleged_testhost_ip_str)) if not alleged_testhost_ip.is_loopback: # A non-loopback address is about the best guess we can get. Use it. logs.irl.debug(' ip used to generate connection to %s is non-loopback. Using %s', ip, alleged_testhost_ip_str) self.__alleged_testhost_ip = alleged_testhost_ip_str return # Localhost. Great. We are either running on the DUT or are on a test-host. # In either case, grabbing pretty much any ip interface that isn't a loop back # should do the trick. docker_net = [] mono_net = [] eform_net = [] vbox_net = [] veth_net = [] extras_net = [] int_list = netifaces.interfaces() for interface in int_list: logs.irl.debug(' checking interface %s', interface) ifaddrs = netifaces.ifaddresses(interface) if netifaces.AF_INET not in ifaddrs: logs.irl.debug(' -- no ifaddrs on it, skipping') else: for net in ifaddrs[netifaces.AF_INET]: logs.irl.debug(' checking %s on %s', net, interface) addr = net['addr'] mask = net['netmask'] inet_form = u'{}/{}'.format(addr, mask) this_iface = ipaddress.ip_interface(inet_form) this_iface.on_name = interface dispo = None if this_iface.is_loopback: dispo = 'loopback-skip' elif monip_obj in this_iface.network: # really the last choice, all things considered! dispo = 'added to control-network-list' mono_net.append(this_iface) elif 'docker' in interface: dispo = 'added to docker list' docker_net.append(this_iface) elif interface.startswith('vbox'): dispo = 'added to vbox list' vbox_net.append(this_iface) elif interface.startswith('veth'): dispo = 'added to veth list' veth_net.append(this_iface) elif interface.startswith('eth') or interface.startswith('en'): dispo = 'added to en/eth list' eform_net.append(this_iface) else: logs.irl.debug('unknown interface type-ish %s seen', interface) dispo = 'added to extras list' extras_net.append(this_iface) logs.irl.debug(' -> %s', dispo) ordered_list = [] ordered_list.extend(eform_net) ordered_list.extend(docker_net) ordered_list.extend(vbox_net) ordered_list.extend(veth_net) ordered_list.extend(extras_net) ordered_list.extend(mono_net) logs.irl.debug(' Final list of possible addresses: %s', ordered_list) # note: we could go and ssh over and ping back to check these. For now, just # grab the first. if len(ordered_list) == 0: logs.warning('could not find the test-host ip address and fell back on localhost') self.__alleged_testhost_ip = '127.0.1.1' return picked = ordered_list[0] logs.irl.debug('picked %s on %s', picked.ip, picked.on_name) self.__alleged_testhost_ip = str(picked.ip)
def deploy_ova(self,vm,uname,passwd,numvms,ovafile): print '**** Deploying OVA file on hypervisor ' + fit_common.fitargs()['hyper'] rc = subprocess.call("ovftool --X:injectOvfEnv --overwrite " "--powerOffTarget --skipManifestCheck -q " "--net:'ADMIN'='VM Network' " "--net:'CONTROL'='Control Network' " "--net:'PDU'='PDU Network' " "--name='ora-stack-" + fit_common.fitargs()['stack'] + "-" + str(vm) + "' " "--noSSLVerify " + ovafile + " vi://" + uname + ":" + passwd + "@" + fit_common.fitargs()['hyper'], shell=True) # Check for successful completion if rc > 0: print 'OVA installer failed at host: ' + fit_common.fitargs()['hyper'] + "Exiting..." sys.exit(255) # Wait for VM to settle fit_common.countdown(30) # check number of vms for deployment if numvms == 1: ovamac = fit_common.fitcfg()['ovamac'] else: # compose appropriate MAC address using lab convention vmnum = '{0:02}'.format(int(vm)) macsplit = fit_common.fitcfg()['ovamac'].split(":") ovamac = macsplit[0] + ":" + macsplit[1] + ":" + macsplit[2] + ":" + macsplit[3] + ":" + macsplit[4] + ":" + vmnum # Install MAC address by editing OVA .vmx file, then startup VM esxi_command = "export fullpath=`find vmfs -name ora-stack-" + fit_common.fitargs()['stack'] + "-" + str(vm) + "*.vmx`;" \ "for file in $fullpath;" \ "do " \ "export editline=`cat $file |grep \\\'ethernet0.generatedAddress =\\\'`;" \ "export editcmd=\\\'/\\\'$editline\\\'\/ c\\\ethernet0.address = \\\"" + ovamac + "\\\"\\\';" \ "sed -i \\\"$editcmd\\\" $file;" \ "sed -i \\\'/ethernet0.addressType = \\\"vpx\\\"/ c\\\ethernet0.addressType = \\\"static\\\"\\\' $file;" \ "sed -i \\\'/ethernet0.addressType = \\\"generated\\\"/ c\\\ethernet0.addressType = \\\"static\\\"\\\' $file;" \ "done;" \ "sleep 5;" \ "export vmidstring=`vim-cmd vmsvc/getallvms |grep ora-stack-" + fit_common.fitargs()['stack'] + "-" + str(vm) + "`;" \ "for vmid in $vmidstring;" \ "do " \ "vim-cmd vmsvc/power.on $vmid;" \ "exit $?;" \ "done;" (command_output, exitstatus) = \ fit_common.pexpect.run( "ssh -q -o StrictHostKeyChecking=no -t " + uname + "@" + fit_common.fitargs()['hyper'] + " " + esxi_command, withexitstatus=1, events={"assword": passwd + "\n"}, timeout=20, logfile=sys.stdout) if exitstatus > 0: print "MAC address processing failed. Exiting..." sys.exit(255) # Poll the OVA via ping for dummy in range(0, 30): if vm > 0: hostname = "stack" + fit_common.fitargs()['stack'] + "-ora-" + str(vm) + ".admin" else: hostname = "stack" + fit_common.fitargs()['stack'] + "-ora.admin" rc = subprocess.call("ping -c 1 -w 5 " + hostname, shell=True) if rc == 0: break else: fit_common.time.sleep(10) self.assertEqual(rc, 0, "VM did not activate.") # Sync time on ORA localdate = fit_common.subprocess.check_output("date +%s", shell=True) fit_common.remote_shell("/bin/date -s @" + localdate.replace("\n", "") + ";/sbin/hwclock --systohc") return None
def deploy_ova(self, vm, uname, passwd, numvms, ovafile): print '**** Deploying OVA file on hypervisor ' + fit_common.fitargs( )['hyper'] rc = subprocess.call( "ovftool --X:injectOvfEnv --overwrite " "--powerOffTarget --skipManifestCheck -q " "--net:'ADMIN'='VM Network' " "--net:'CONTROL'='Control Network' " "--net:'PDU'='PDU Network' " "--name='ora-stack-" + fit_common.fitargs()['stack'] + "-" + str(vm) + "' " "--noSSLVerify " + ovafile + " vi://" + uname + ":" + passwd + "@" + fit_common.fitargs()['hyper'], shell=True) # Check for successful completion if rc > 0: print 'OVA installer failed at host: ' + fit_common.fitargs( )['hyper'] + "Exiting..." sys.exit(255) # Wait for VM to settle fit_common.countdown(30) # check number of vms for deployment if numvms == 1: ovamac = fit_common.fitcfg()['ovamac'] else: # compose appropriate MAC address using lab convention vmnum = '{0:02}'.format(int(vm)) macsplit = fit_common.fitcfg()['ovamac'].split(":") ovamac = macsplit[0] + ":" + macsplit[1] + ":" + macsplit[ 2] + ":" + macsplit[3] + ":" + macsplit[4] + ":" + vmnum # Install MAC address by editing OVA .vmx file, then startup VM esxi_command = "export fullpath=`find vmfs -name ora-stack-" + fit_common.fitargs()['stack'] + "-" + str(vm) + "*.vmx`;" \ "for file in $fullpath;" \ "do " \ "export editline=`cat $file |grep \\\'ethernet0.generatedAddress =\\\'`;" \ "export editcmd=\\\'/\\\'$editline\\\'\/ c\\\ethernet0.address = \\\"" + ovamac + "\\\"\\\';" \ "sed -i \\\"$editcmd\\\" $file;" \ "sed -i \\\'/ethernet0.addressType = \\\"vpx\\\"/ c\\\ethernet0.addressType = \\\"static\\\"\\\' $file;" \ "sed -i \\\'/ethernet0.addressType = \\\"generated\\\"/ c\\\ethernet0.addressType = \\\"static\\\"\\\' $file;" \ "done;" \ "sleep 5;" \ "export vmidstring=`vim-cmd vmsvc/getallvms |grep ora-stack-" + fit_common.fitargs()['stack'] + "-" + str(vm) + "`;" \ "for vmid in $vmidstring;" \ "do " \ "vim-cmd vmsvc/power.on $vmid;" \ "exit $?;" \ "done;" (command_output, exitstatus) = \ fit_common.pexpect.run( "ssh -q -o StrictHostKeyChecking=no -t " + uname + "@" + fit_common.fitargs()['hyper'] + " " + esxi_command, withexitstatus=1, events={"assword": passwd + "\n"}, timeout=20, logfile=sys.stdout) if exitstatus > 0: print "MAC address processing failed. Exiting..." sys.exit(255) # Poll the OVA via ping for dummy in range(0, 30): if vm > 0: hostname = "stack" + fit_common.fitargs( )['stack'] + "-ora-" + str(vm) + ".admin" else: hostname = "stack" + fit_common.fitargs( )['stack'] + "-ora.admin" rc = subprocess.call("ping -c 1 -w 5 " + hostname, shell=True) if rc == 0: break else: fit_common.time.sleep(10) self.assertEqual(rc, 0, "VM did not activate.") # Sync time on ORA localdate = fit_common.subprocess.check_output("date +%s", shell=True) fit_common.remote_shell("/bin/date -s @" + localdate.replace("\n", "") + ";/sbin/hwclock --systohc") return None
def setUp(self): self.number_of_vms = int(fit_common.fitargs()['numvms']) self.number_of_services = self.number_of_vms - 1 # number of mongo and rabbitmq resource
def setUpClass(self): if fit_common.fitargs()['stack'] == 'vagrant_guest': IP = "127.0.0.1" elif fit_common.fitargs()['stack'] in ['vagrant', 'vagrant_ucs']: IP = "10.0.2.2" else: logs.info(" Not running in a vagrant environment, skipping tests") return logs.debug_1("MONGO_PORT: {0}, AMQP_PORT: {1}, HTTP_PORT:{2}".format(MONGO_PORT, AMQP_PORT, HTTP_PORT)) self.CLIENT = MongoClient('localhost', MONGO_PORT) self.db = self.CLIENT.pxe self.node = { "identifiers": ["FF:FF:FF:FF:FF:FF"], "name": "FF:FF:FF:FF:FF:FF", "relations": [], "tags": [], "type": "compute" } self.CLIENT = MongoClient('localhost', MONGO_PORT) self.db = self.CLIENT.pxe self.node = { "identifiers": ["FF:FF:FF:FF:FF:FF"], "name": "FF:FF:FF:FF:FF:FF", "relations": [], "tags": [], "type": "compute" } self.NODES_COUNT = self.db.nodes.count() self.OBMS_COUNT = self.db.obms.count() self.CATALOGS_COUNT = self.db.catalogs.count() logs.debug_3("Counts at first: Node {0} Catalogs {1} Obms {2}" .format(self.NODES_COUNT, self.CATALOGS_COUNT, self.OBMS_COUNT)) logs.debug_3('Number of nodes before the test = {0}'.format(self.NODES_COUNT)) logs.debug_3('Number of obms before the test = {0}'.format(self.OBMS_COUNT)) logs.debug_3('Number of catalogs before the test = {0}'.format(self.CATALOGS_COUNT)) self.nodeID = self.db.nodes.insert(self.node) self.obm = { "config": { "host": IP, "user": "******", "password": "******" }, "node": ObjectId(self.nodeID), "service": "ipmi-obm-service" } self.bmcCatalog = { "node": ObjectId(self.nodeID), "source": "bmc", "data": { "Set in Progress": "Set Complete", "Auth Type Support": "MD5", "Auth Type Enable": { "Callback": "MD5 ", "User": "******", "Operator": "MD5 ", "Admin": "MD5 ", "OEM": "" }, "IP Address Source": "DHCP Address", "IP Address": IP, "Subnet Mask": "255.255.255.0", "MAC Address": "64:00:6a:c3:52:32" } } self.ipmifruCatalog = { "node": ObjectId(self.nodeID), "source": "ipmi-fru", "data": { "Builtin FRU Device (ID 0)": { "Board Product": None, "Product Serial": None, "Board Serial": None } } } self.payload = { "Context": "context string", "EventId": "8689", "EventTimestamp": "2017-04-05T10:31:29-0500", "EventType": "Alert", "MemberId": "7e675c8e-127a-11e7-9fc8-64006ac35232", "Message": "The coin cell battery in CMC 1 is not working.", "MessageArgs": ["1"], "*****@*****.**": 1, "MessageId": "CMC8572", "Severity": "critical" } cursor = self.db.nodes.find() for document in cursor: logs.info("Node collection: {0}".format(document)) cursor = self.db.obms.find({}) for document in cursor: logs.debug_3("OBM collection: {0}".format(document)) cursor = self.db.catalogs.find({}) for document in cursor: logs.debug_3("Catalogs collection: : {0}".format(document))
def setUp(self): self.number_of_vms = int(fit_common.fitargs()['numvms']) self.number_of_services = self.number_of_vms - 1 # number of mongo and rabbitmq resource
def setUpClass(self): if fit_common.fitargs()['stack'] == 'vagrant_guest': IP = "127.0.0.1" elif fit_common.fitargs()['stack'] in ['vagrant', 'vagrant_ucs']: IP = "10.0.2.2" else: logs.info(" Not running in a vagrant environment, skipping tests") return logs.debug_1("MONGO_PORT: {0}, AMQP_PORT: {1}, HTTP_PORT:{2}".format( MONGO_PORT, AMQP_PORT, HTTP_PORT)) self.CLIENT = MongoClient('localhost', MONGO_PORT) self.db = self.CLIENT.pxe self.node = { "identifiers": ["FF:FF:FF:FF:FF:FF"], "name": "FF:FF:FF:FF:FF:FF", "relations": [], "tags": [], "type": "compute" } self.CLIENT = MongoClient('localhost', MONGO_PORT) self.db = self.CLIENT.pxe self.node = { "identifiers": ["FF:FF:FF:FF:FF:FF"], "name": "FF:FF:FF:FF:FF:FF", "relations": [], "tags": [], "type": "compute" } self.NODES_COUNT = self.db.nodes.count() self.OBMS_COUNT = self.db.obms.count() self.CATALOGS_COUNT = self.db.catalogs.count() logs.debug_3( "Counts at first: Node {0} Catalogs {1} Obms {2}".format( self.NODES_COUNT, self.CATALOGS_COUNT, self.OBMS_COUNT)) logs.debug_3('Number of nodes before the test = {0}'.format( self.NODES_COUNT)) logs.debug_3('Number of obms before the test = {0}'.format( self.OBMS_COUNT)) logs.debug_3('Number of catalogs before the test = {0}'.format( self.CATALOGS_COUNT)) self.nodeID = self.db.nodes.insert(self.node) self.obm = { "config": { "host": IP, "user": "******", "password": "******" }, "node": ObjectId(self.nodeID), "service": "ipmi-obm-service" } self.bmcCatalog = { "node": ObjectId(self.nodeID), "source": "bmc", "data": { "Set in Progress": "Set Complete", "Auth Type Support": "MD5", "Auth Type Enable": { "Callback": "MD5 ", "User": "******", "Operator": "MD5 ", "Admin": "MD5 ", "OEM": "" }, "IP Address Source": "DHCP Address", "IP Address": IP, "Subnet Mask": "255.255.255.0", "MAC Address": "64:00:6a:c3:52:32" } } self.ipmifruCatalog = { "node": ObjectId(self.nodeID), "source": "ipmi-fru", "data": { "Builtin FRU Device (ID 0)": { "Board Product": None, "Product Serial": None, "Board Serial": None } } } self.payload = { "Context": "context string", "EventId": "8689", "EventTimestamp": "2017-04-05T10:31:29-0500", "EventType": "Alert", "MemberId": "7e675c8e-127a-11e7-9fc8-64006ac35232", "Message": "The coin cell battery in CMC 1 is not working.", "MessageArgs": ["1"], "*****@*****.**": 1, "MessageId": "CMC8572", "Severity": "critical" } cursor = self.db.nodes.find() for document in cursor: logs.info("Node collection: {0}".format(document)) cursor = self.db.obms.find({}) for document in cursor: logs.debug_3("OBM collection: {0}".format(document)) cursor = self.db.catalogs.find({}) for document in cursor: logs.debug_3("Catalogs collection: : {0}".format(document))
def test04_rackhd_smoke_test(self): # set test group to 'smoke' fit_common.fitargs()['group'] = "smoke" self.assertEqual(fit_common.run_nose(fit_common.TEST_PATH + '/tests'), 0, 'RackHD Smoke Test failed.')
import Queue import flogging import logging import pika import unittest import threading import fit_common from nose.plugins.attrib import attr logs = flogging.get_loggers() amqp_queue = Queue.Queue(maxsize=0) from pymongo import MongoClient from bson.objectid import ObjectId import json # Check the running test environment if fit_common.fitargs()['stack'] in [ 'vagrant_guest', 'vagrant', 'vagrant_ucs' ]: env_vagrant = True else: env_vagrant = False AMQP_PORT = fit_common.fitports()['amqp_ssl'] MONGO_PORT = fit_common.fitports()['mongo_port'] HTTP_PORT = fit_common.fitports()['http'] class AmqpWorker(threading.Thread): def __init__(self, exchange_name, topic_routing_key, external_callback,
import Queue import flogging import logging import pika import unittest import threading import fit_common from nose.plugins.attrib import attr logs = flogging.get_loggers() amqp_queue = Queue.Queue(maxsize=0) from pymongo import MongoClient from bson.objectid import ObjectId import json # Check the running test environment if fit_common.fitargs()['stack'] in ['vagrant_guest', 'vagrant', 'vagrant_ucs']: env_vagrant = True else: env_vagrant = False AMQP_PORT = fit_common.fitports()['amqp_ssl'] MONGO_PORT = fit_common.fitports()['mongo_port'] HTTP_PORT = fit_common.fitports()['http'] class AmqpWorker(threading.Thread): def __init__(self, exchange_name, topic_routing_key, external_callback, timeout=10): threading.Thread.__init__(self) pika_logger = logging.getLogger('pika') if fit_common.VERBOSITY >= 8: pika_logger.setLevel(logging.DEBUG) elif fit_common.VERBOSITY >= 4:
- colocate mongo and rabbitmq resource with virtual ip - create mongo replica set - create rabbitmq mirrored queue policies usage: python run_tests.py -stack <stack ID> -test deploy/rackhd_ha_resource_install.py -numvms <num> """ from jinja2 import Environment, FileSystemLoader import os import time import unittest import fit_path # NOQA: unused import import fit_common nodelist = [] # list of active nodes in cluster numvms = int(fit_common.fitargs()['numvms']) err = [] numrs = numvms - 1 # number of mongo resource vip_dict = {'mongo': [], 'rabbit': [], 'rackhd': []} class rackhd_ha_resource_install(unittest.TestCase): longMessage = True def setUp(self): # collect active nodes in cluster for vmnum in range(1, numvms + 1): command = "crm_mon -X | grep 'node{}.*online=.true' -q".format( vmnum) status = fit_common.remote_shell(command, vmnum=vmnum)['exitcode'] if status != 0:
- sets the cluster properties usage: python run_tests.py -stack <stack ID> -test deploy/rackhd_ha_install.py -numvms <num> """ from jinja2 import Environment, FileSystemLoader import fit_path # NOQA: unused import import os import subprocess import unittest import time from nosedep import depends import fit_common ifslist = [] # array of valid eth ports numvms = int(fit_common.fitargs()['numvms']) class rackhd_ha_install(unittest.TestCase): def test01_install_network_config(self): for vmnum in range(1, numvms + 1): # collect nic names getifs = fit_common.remote_shell("ifconfig -s -a |tail -n +2 |grep -v -e Iface -e lo -e docker", vmnum=vmnum) # clean out login stuff splitifs = getifs['stdout'].split('\n') for item in splitifs: if "assword" not in item and item.split(" ")[0]: ifslist.append(item.split(" ")[0])
def test04_rackhd_smoke_test(self): # set test group to 'smoke' fit_common.fitargs()['group'] = "smoke" self.assertEqual(fit_common.run_nose(fit_common.TEST_PATH + '/tests'), 0, 'RackHD Smoke Test failed.')
def __init__(self): self.__alleged_testhost_ip = None s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = fit_common.fitargs()['rackhd_host'] monip = fit_common.fitcfg()["rackhd-config"]["apiServerAddress"] monip_obj = ipaddress.ip_address(monip) logs.irl.debug( 'Trying to determine testhost IP address. Hitting rackhd_host value %s first', ip) s.connect((ip, 0)) logs.debug(' ip used to generate connection to %s was %s: ', ip, s.getsockname()[0]) alleged_testhost_ip_str = s.getsockname()[0] # python2/3 flake handling. The 'unicode' keyword is gone from p3. However, although # our code is p2, hound uses p3. We can cover both by using the -type- of a unicode string! ucode_type = type(u'unicode_string_to_type') alleged_testhost_ip = ipaddress.ip_address( ucode_type(alleged_testhost_ip_str)) if not alleged_testhost_ip.is_loopback: # A non-loopback address is about the best guess we can get. Use it. logs.irl.debug( ' ip used to generate connection to %s is non-loopback. Using %s', ip, alleged_testhost_ip_str) self.__alleged_testhost_ip = alleged_testhost_ip_str return # Localhost. Great. We are either running on the DUT or are on a test-host. # In either case, grabbing pretty much any ip interface that isn't a loop back # should do the trick. docker_net = [] mono_net = [] eform_net = [] vbox_net = [] veth_net = [] extras_net = [] int_list = netifaces.interfaces() for interface in int_list: logs.irl.debug(' checking interface %s', interface) ifaddrs = netifaces.ifaddresses(interface) if netifaces.AF_INET not in ifaddrs: logs.irl.debug(' -- no ifaddrs on it, skipping') else: for net in ifaddrs[netifaces.AF_INET]: logs.irl.debug(' checking %s on %s', net, interface) addr = net['addr'] mask = net['netmask'] inet_form = u'{}/{}'.format(addr, mask) this_iface = ipaddress.ip_interface(inet_form) this_iface.on_name = interface dispo = None if this_iface.is_loopback: dispo = 'loopback-skip' elif monip_obj in this_iface.network: # really the last choice, all things considered! dispo = 'added to control-network-list' mono_net.append(this_iface) elif 'docker' in interface: dispo = 'added to docker list' docker_net.append(this_iface) elif interface.startswith('vbox'): dispo = 'added to vbox list' vbox_net.append(this_iface) elif interface.startswith('veth'): dispo = 'added to veth list' veth_net.append(this_iface) elif interface.startswith('eth') or interface.startswith( 'en'): dispo = 'added to en/eth list' eform_net.append(this_iface) else: logs.irl.debug('unknown interface type-ish %s seen', interface) dispo = 'added to extras list' extras_net.append(this_iface) logs.irl.debug(' -> %s', dispo) ordered_list = [] ordered_list.extend(eform_net) ordered_list.extend(docker_net) ordered_list.extend(vbox_net) ordered_list.extend(veth_net) ordered_list.extend(extras_net) ordered_list.extend(mono_net) logs.irl.debug(' Final list of possible addresses: %s', ordered_list) # note: we could go and ssh over and ping back to check these. For now, just # grab the first. if len(ordered_list) == 0: logs.warning( 'could not find the test-host ip address and fell back on localhost' ) self.__alleged_testhost_ip = '127.0.1.1' return picked = ordered_list[0] logs.irl.debug('picked %s on %s', picked.ip, picked.on_name) self.__alleged_testhost_ip = str(picked.ip)