def test041_check_rabbit_address_and_ip(self): err_addr = err_ip = '' # configure virtual ip for rabbitmq resource sb_net = self.get_southbound_network() self.assertIsNotNone(sb_net, 'Southbound network not found') for rabbit_number in range(1, self.number_of_services + 1): # check for name command = "crm config show | grep 'primitive.*rabbit_addr_{0}' -q" \ .format(rabbit_number) status = fit_common.remote_shell( command, vmnum=active_node_list[0])['exitcode'] if status != 0: err_addr += ', '.format(rabbit_number) else: # now check the IP address command = ( "crm config show | grep -A3 'primitive.*rabbit_addr_{0}'" + " | grep 'ip={1}.{2}{0}' -q").format( rabbit_number, sb_net, RABBIT_IP_PREFIX) status = fit_common.remote_shell( command, vmnum=active_node_list[0])['exitcode'] if status != 0: err_ip += '{1}.{2}{0} for rabbit_addr_{0}, '.format( rabbit_number, sb_net, RABBIT_IP_PREFIX) self.assertEqual( len(err_addr), 0, 'The following rabbit addresses are missing: {}'.format( err_addr.rstrip(", "))) self.assertEqual( len(err_ip), 0, 'The following rabbit IP addresses are missing or incorrect: {}'. format(err_ip.rstrip(", ")))
def test01_install_mongo_resource(self): # create resource on first active node vmnum = nodelist[0] sb_net = self.get_southbound_network() self.assertIsNotNone(sb_net, "Could not find southbound address") for mongo in range(1, numrs + 1): # start mongo container as pacemaker resource rsc = 'docker_mongo_{}'.format(mongo) command = ("crm configure primitive {} ocf:heartbeat:docker " + "params allow_pull=true image='registry.hwimo.lab.emc.com/mongo' " + "run_opts=\\\'--privileged=true --net='host' -d -p 27017:27017\\\' " + "run_cmd=\\\'--replSet mongo_rs --logpath /var/log/mongodb/mongod.log\\\' " + "meta is-managed='true'").format(rsc) self.assertEqual(fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "{} resource failure.".format(rsc)) # create mongo virtual ip resource ip = '{}.12{}'.format(sb_net, mongo) vip_dict['mongo'].append(ip) rsc_ip = 'mongo_addr_{}'.format(mongo) self.assertTrue(self.configure_virtual_ip_resource(vmnum, ip, rsc_ip), "{} resource failure.".format(rsc_ip)) # colocate mongo and virtual IPs mongo_cls = 'mongo{}'.format(mongo) command = "crm configure colocation {} inf: {} {}".format(mongo_cls, rsc, rsc_ip) self.assertEqual(fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "colocation failure") # create mongo replica config mongo_rep = open('mongo_replica_init.bash', 'w') mongo_rep.write(self.create_mongo_config(vip_dict['mongo'])) mongo_rep.close() # copy file to ora fit_common.scp_file_to_host('mongo_replica_init.bash', vmnum) os.remove('mongo_replica_init.bash') fit_common.remote_shell("chmod 777 mongo_replica_init.bash", vmnum=vmnum)['exitcode'] # run script to initiate replica set self.assertEqual(fit_common.remote_shell("./mongo_replica_init.bash", vmnum=vmnum)['exitcode'], 0, "Mongo replica initiation failure")
def test03_clone_rackhd_source(self): print "**** Cloning RackHD source." modules = [ "on-core", "on-dhcp-proxy", "on-http", "on-statsd", "on-syslog", "on-taskgraph", "on-tasks", "on-tftp", "on-tools" ] # clone base repo fit_common.remote_shell('rm -rf ~/rackhd') self.assertEqual(fit_common.remote_shell(ENVVARS + "git clone " + fit_common.GLOBAL_CONFIG['repos']['install']['rackhd'] + "/rackhd ~/rackhd" )['exitcode'], 0, "RackHD git clone failure.") # clone modules for repo in modules: self.assertEqual(fit_common.remote_shell(ENVVARS + "rm -rf ~rackhd/" + repo + ";" + "git clone " + fit_common.GLOBAL_CONFIG['repos']['install']['rackhd'] + "/" + repo + " ~/rackhd/" + repo )['exitcode'], 0, "RackHD git clone module failure:" + repo)
def test06_install_network_config(self): print "**** Installing RackHD network config." # install control network config self.assertEqual( fit_common.remote_shell( "echo 'auto " + IFLIST[7] + "' > /etc/network/interfaces.d/control.cfg;" "echo 'iface " + IFLIST[7] + " inet static' >> /etc/network/interfaces.d/control.cfg;" "echo 'address 172.31.128.1' >> /etc/network/interfaces.d/control.cfg;" "echo 'netmask 255.255.252.0' >> /etc/network/interfaces.d/control.cfg" )['exitcode'], 0, "Network config failure.") # If PDU network is present, configure try: IFLIST[8] except IndexError: print "**** No PDU network will be configured" else: self.assertEqual( fit_common.remote_shell( "echo 'auto " + IFLIST[8] + "' > /etc/network/interfaces.d/pdudirect.cfg;" "echo 'iface " + IFLIST[8] + " inet static' >> /etc/network/interfaces.d/pdudirect.cfg;" "echo 'address 192.168.1.1' >> /etc/network/interfaces.d/pdudirect.cfg;" "echo 'netmask 255.255.255.0' >> /etc/network/interfaces.d/pdudirect.cfg" )['exitcode'], 0, "Network config failure.")
def test05_install_rackhd_config_files(self): print "**** Installing RackHD config files." # create RackHD config hdconfig = fit_common.fitcfg()['rackhd-config'] config_json = open('config.json', 'w') config_json.write( fit_common.json.dumps(hdconfig, sort_keys=True, indent=4)) config_json.close() # AMQP config files rabbitmq_config = open('rabbitmq.config', 'w') rabbitmq_config.write( '[{rabbit,[{tcp_listeners, [5672]},{loopback_users, []}]},{rabbitmq_management,[{listener, [{port, 15672},{ip,"127.0.0.1"}]}]}].' ) rabbitmq_config.close() # copy files to ORA fit_common.scp_file_to_ora('config.json') fit_common.scp_file_to_ora('rabbitmq.config') self.assertEqual( fit_common.remote_shell('cp config.json /opt/monorail/') ['exitcode'], 0, "RackHD Config file failure.") self.assertEqual( fit_common.remote_shell('cp rabbitmq.config /etc/rabbitmq/') ['exitcode'], 0, "AMQP Config file failure.") os.remove('config.json') os.remove('rabbitmq.config') self.assertEqual( fit_common.remote_shell(PROXYVARS + "cd ~/src/on-http && ./install-web-ui.sh") ['exitcode'], 0, "web-ui install failure.") self.assertEqual( fit_common.remote_shell( PROXYVARS + "cd ~/src/on-http && ./install-swagger-ui.sh")['exitcode'], 0, "swagger-ui install failure.")
def test_full_sel(self): # """Test: Full sel log""" # listen to AMQP skip_test = False # skip filling the BMC SEL log on real hardware as don't need to test how the HW is working if "stackType" in fit_common.fitcfg(): if fit_common.fitcfg()['stackType'] == "baremetal": skip_test = True logs.info(" *** Skipping test_full_sel on baremetal stack") if not skip_test: bmc_ip = self.__get_run_context('bmc_ip') node_id = self.__get_run_context('node_id') poller_id = self.__get_run_context('poller_id') available_sel_entries = self.__get_run_context('available_sel_entries') self.__qproc.match_on_routekey('polleralert-sel-update', min=available_sel_entries - 3, max=available_sel_entries + 3, routing_key='polleralert.sel.updated.#.{}.{}'.format(poller_id, node_id)) self.__run_ipmitool_command(bmc_ip, "sel clear") self.__verify_empty_sel(bmc_ip) sel_file = self.__create_selEntries_file(available_sel_entries) fit_common.remote_shell('ls') sel_file_path = fit_common.scp_file_to_host(sel_file) self.__run_ipmitool_command(bmc_ip, "sel add {0}".format(sel_file_path)) # wait for the results results = self._amqp_sp.finish(timeout=360) results[0].assert_errors(self)
def test00_update_config(self): # this will add proxy settings to default OnRack Config file monorail_config = fit_common.rackhdapi('/api/2.0/config')['json'] monorail_config.update({ "httpProxies": [{ "localPath": "/mirror", "remotePath": "/", "server": fit_common.GLOBAL_CONFIG['repos']['mirror'] }] }) monorail_json = open('monorail.json', 'w') monorail_json.write( fit_common.json.dumps(monorail_config, sort_keys=True, indent=4)) monorail_json.close() fit_common.scp_file_to_ora('monorail.json') self.assertEqual( fit_common.remote_shell('cp monorail.json /opt/onrack/etc/') ['exitcode'], 0, "RackHD Config file failure.") os.remove('monorail.json') print "**** Restart services..." fit_common.remote_shell("/opt/onrack/bin/monorail restart") fit_common.countdown(30) self.assertEqual( fit_common.rackhdapi("/api/2.0/config")['status'], 200, "Unable to contact Onrack.")
def test07_reboot_and_check(self): print "**** Reboot and check installation." # create startup files self.assertEqual( fit_common.remote_shell( "touch /etc/default/on-dhcp-proxy /etc/default/on-http /etc/default/on-tftp /etc/default/on-syslog /etc/default/on-taskgraph" )['exitcode'], 0, "Install failure.") # reboot print "**** Rebooting appliance..." fit_common.remote_shell("shutdown -r now") print "**** Waiting for login..." fit_common.countdown(30) shell_data = 0 for dummy in range(0, 30): shell_data = fit_common.remote_shell("pwd") if shell_data['exitcode'] == 0: break else: fit_common.time.sleep(5) self.assertEqual(shell_data['exitcode'], 0, "Shell test failed after appliance reboot") fit_common.time.sleep(10) self.assertEqual( fit_common.rackhdapi("/api/2.0/config")['status'], 200, "Unable to contact RackHD.")
def test02_install_dhcp_config(self): # create DHCP config for vmnum in range(1, numvms + 1): fit_common.remote_shell('echo INTERFACES=' + ifslist[1] + ' > /etc/default/isc-dhcp-server', vmnum=vmnum) dhcp_conf = open('dhcpd.conf', 'w') dhcp_conf.write( 'ddns-update-style none;\n' 'option domain-name "example.org";\n' 'option domain-name-servers ns1.example.org, ns2.example.org;\n' 'default-lease-time 600;\n' 'max-lease-time 7200;\n' 'log-facility local7;\n' 'deny duplicates;\n' 'ignore-client-uids true;\n' 'subnet 172.31.128.0 netmask 255.255.252.0 {\n' ' range 172.31.128.100 172.31.131.254;\n' ' option vendor-class-identifier "PXEClient";\n' '}\n') dhcp_conf.close() # copy file to ORA fit_common.scp_file_to_host('dhcpd.conf', vmnum) os.remove('dhcpd.conf') self.assertEqual( fit_common.remote_shell('cp dhcpd.conf /etc/dhcp/', vmnum=vmnum)['exitcode'], 0, "DHCP Config failure.")
def test_full_sel(self): # """Test: Full sel log""" # listen to AMQP bmc_ip = self.__get_run_context('bmc_ip') node_id = self.__get_run_context('node_id') poller_id = self.__get_run_context('poller_id') available_sel_entries = self.__get_run_context('available_sel_entries') self.__qproc.match_on_routekey( 'polleralert-sel-update', min=available_sel_entries - 3, max=available_sel_entries, routing_key='polleralert.sel.updated.#.{}.{}'.format( poller_id, node_id)) self.__run_ipmitool_command(bmc_ip, "sel clear") self.__verify_empty_sel(bmc_ip) sel_file = self.__create_selEntries_file(available_sel_entries) fit_common.remote_shell('ls') sel_file_path = fit_common.scp_file_to_host(sel_file) self.__run_ipmitool_command(bmc_ip, "sel add {0}".format(sel_file_path)) # wait for the results results = self._amqp_sp.finish(timeout=360) results[0].assert_errors(self)
def test03_install_hosts_config(self): sb_net = self.get_southbound_network() self.assertIsNotNone(sb_net, "Could not find southbound address") for vmnum in range(1, numvms + 1): self.create_hosts() # copy file to ORA fit_common.scp_file_to_host('hosts-conf', vmnum) # Clean out the previous entries to be idempotent command = "grep -v {} /etc/hosts > hosts".format(sb_net) self.assertEqual( fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "Hosts Config failure; Cleaning out previous entries") # Replace the local hostname command = "sed -i 's/ora/node{}/' hosts".format(vmnum) self.assertEqual( fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "Hosts Config failure; Replacing local hostname") # Add the new entries self.assertEqual( fit_common.remote_shell('cat hosts-conf >> hosts', vmnum=vmnum)['exitcode'], 0, "Hosts Config failure; Adding new entries") # Move the new file into place self.assertEqual( fit_common.remote_shell('mv hosts /etc/hosts', vmnum=vmnum)['exitcode'], 0, "Hosts Config failure; Moving new file into place") os.remove('hosts-conf')
def test_full_sel(self): # """Test: Full sel log""" # listen to AMQP bmc_ip = self.__get_run_context('bmc_ip') node_id = self.__get_run_context('node_id') poller_id = self.__get_run_context('poller_id') available_sel_entries = self.__get_run_context('available_sel_entries') self.__qproc.match_on_routekey('polleralert-sel-update', min=available_sel_entries - 3, max=available_sel_entries, routing_key='polleralert.sel.updated.#.{}.{}'.format(poller_id, node_id)) self.__run_ipmitool_command(bmc_ip, "sel clear") self.__verify_empty_sel(bmc_ip) sel_file = self.__create_selEntries_file(available_sel_entries) fit_common.remote_shell('ls') sel_file_path = fit_common.scp_file_to_host(sel_file) self.__run_ipmitool_command(bmc_ip, "sel add {0}".format(sel_file_path)) # wait for the results results = self._amqp_sp.finish(timeout=360) results[0].assert_errors(self)
def test01_install_ova_template(self): ovafile = fit_common.ARGS_LIST['template'] numvms = int(fit_common.ARGS_LIST['numvms']) # Check for ovftool self.assertEqual( fit_common.subprocess.call('which ovftool', shell=True), 0, "FAILURE: 'ovftool' not installed.") # Ping for valid ESXi host self.assertEqual( fit_common.subprocess.call('ping -c 1 ' + fit_common.ARGS_LIST['hyper'], shell=True), 0, "FAILURE: ESXi hypervisor not found.") # Run probe to check for valid OVA file rc = fit_common.subprocess.call("ovftool " + ovafile, shell=True) self.assertEqual(rc, 0, 'Invalid or missing OVA file: ' + ovafile) # check for number of virtual machine self.assertTrue(numvms < 100, "Number of vms should not be greater than 99") # check stack ID as number to generate MAC address for multiple OVA if numvms > 1: self.assertTrue( fit_common.ARGS_LIST['stack'].isdigit(), "Stack ID must be a number if numvms is greater than 1") # Shutdown previous ORA if fit_common.subprocess.call('ping -c 1 ' + fit_common.ARGS_LIST['ora'], shell=True) == 0: fit_common.remote_shell('shutdown -h now') fit_common.time.sleep(5) # this clears the hypervisor ssh key from ~/.ssh/known_hosts subprocess.call([ "touch ~/.ssh/known_hosts;ssh-keygen -R " + fit_common.ARGS_LIST['hyper'] + " -f ~/.ssh/known_hosts >/dev/null 2>&1" ], shell=True) # Find correct hypervisor credentials by testing each entry in the list cred_list = fit_common.GLOBAL_CONFIG['credentials']['hyper'] for entry in cred_list: uname = entry['username'] passwd = entry['password'] (command_output, exitstatus) = \ fit_common.pexpect.run( "ssh -q -o StrictHostKeyChecking=no -t " + uname + "@" + fit_common.ARGS_LIST['hyper'] + " pwd", withexitstatus=1, events={"assword": passwd + "\n"}, timeout=20, logfile=None) if exitstatus == 0: break # Run OVA installer for vm in range(0, numvms): self.deploy_ova(vm, uname, passwd, numvms, ovafile)
def test041_check_rabbit_address_and_ip(self): err_addr = err_ip = '' # configure virtual ip for rabbitmq resource sb_net = self.get_southbound_network() self.assertIsNotNone(sb_net, 'Southbound network not found') for rabbit_number in range(1, self.number_of_services + 1): # check for name command = "crm config show | grep 'primitive.*rabbit_addr_{0}' -q" \ .format(rabbit_number) status = fit_common.remote_shell(command, vmnum=active_node_list[0])['exitcode'] if status != 0: err_addr += ', '.format(rabbit_number) else: # now check the IP address command = ("crm config show | grep -A3 'primitive.*rabbit_addr_{0}'" + " | grep 'ip={1}.{2}{0}' -q").format(rabbit_number, sb_net, RABBIT_IP_PREFIX) status = fit_common.remote_shell(command, vmnum=active_node_list[0])['exitcode'] if status != 0: err_ip += '{1}.{2}{0} for rabbit_addr_{0}, '.format(rabbit_number, sb_net, RABBIT_IP_PREFIX) self.assertEqual(len(err_addr), 0, 'The following rabbit addresses are missing: {}' .format(err_addr.rstrip(", "))) self.assertEqual(len(err_ip), 0, 'The following rabbit IP addresses are missing or incorrect: {}' .format(err_ip.rstrip(", ")))
def test02_clone_rackhd_source(self): print "**** Cloning RackHD repo." # clone base repo fit_common.remote_shell('rm -rf ~/rackhd') self.assertEqual(fit_common.remote_shell(PROXYVARS + "git clone " + fit_common.GLOBAL_CONFIG['repos']['install']['rackhd']['repo'] + " ~/rackhd" )['exitcode'], 0, "RackHD git clone failure.")
def test04_run_ansible_installer(self): print "**** Run RackHD Ansible installer." # install Ansible self.assertEqual(fit_common.remote_shell(ENVVARS + "cd ~;apt-get -y install ansible")['exitcode'], 0, "Install failure.") # run Ansible RackHD installer self.assertEqual(fit_common.remote_shell(ENVVARS + "cd ~/rackhd/packer/ansible/;" "ansible-playbook -i 'local,' -c local rackhd_package.yml", timeout=600, )['exitcode'], 0, "Install failure.")
def test00_set_auth_user(self): print '**** Installing default admin user' fit_common.remote_shell('rm auth.json') auth_json = open('auth.json', 'w') auth_json.write('{"username":"******"api"]["admin_user"] + '", "password":"******"api"]["admin_pass"] + '", "role":"Administrator"}') auth_json.close() fit_common.scp_file_to_ora('auth.json') rc = fit_common.remote_shell("curl -ks -X POST -H 'Content-Type:application/json' https://localhost:" + str(fit_common.GLOBAL_CONFIG['ports']['https']) + "/api/2.0/users -d @auth.json" ) if rc['exitcode'] != 0: print "ALERT: Auth admin user not set! Please manually set the admin user account if https access is desired."
def test01_set_auth_user(self): fit_common.remote_shell('rm auth.json') auth_json = open('auth.json', 'w') auth_json.write('{"username":"******"api"][0]["admin_user"] + '", "password":"******"api"][0]["admin_pass"] + '", "role":"Administrator"}') auth_json.close() fit_common.scp_file_to_ora('auth.json') rc = fit_common.remote_shell("curl -ks -X POST -H 'Content-Type:application/json' https://localhost:" + str(fit_common.fitports()['https']) + "/api/2.0/users -d @auth.json") if rc['exitcode'] != 0: log.info_5("ALERT: Auth admin user not set! Please manually set the admin user account if required.")
def test01_update_sudoers_info(self): # update sudoers to preserve proxy environment logs.info(" ***** Update sudoers proxy env ****") sudoersproxy = open("sudoersproxy", 'w') sudoersproxy.write('Defaults env_keep="HOME http_proxy https_proxy ftp_proxy"\n') sudoersproxy.close() fit_common.remote_shell('pwd') fit_common.scp_file_to_ora("sudoersproxy") self.assertEqual(fit_common.remote_shell('cp sudoersproxy /etc/sudoers.d/' )['exitcode'], 0, "sudoersproxy config failure.") os.remove('sudoersproxy')
def test03_check_install(self): print "**** Checking OnRack install." shell_data = fit_common.remote_shell("/opt/onrack/bin/monorail start") self.assertEqual(shell_data['exitcode'], 0, 'Monorail startup registered error') #retry 100 seconds for monorail up for dummy in range(0, 10): if fit_common.remote_shell("/opt/onrack/bin/monorail status")['exitcode'] == 0: break else: fit_common.time.sleep(10) self.assertEqual(shell_data['exitcode'], 0, 'Monorail status registered error')
def test02_install_rackhd_packages(self): print "**** Installing RackHD packages." self.assertEqual(fit_common.remote_shell("echo 'deb https://dl.bintray.com/rackhd/debian trusty main' | tee -a /etc/apt/sources.list")['exitcode'], 0, "Package Install failure.") self.assertEqual(fit_common.remote_shell(ENVVARS + "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 379CE192D401AB61")['exitcode'], 0, "Package Install failure.") self.assertEqual(fit_common.remote_shell(ENVVARS + "apt-get -y update")['exitcode'], 0, "Install failure.") self.assertEqual(fit_common.remote_shell(ENVVARS + "apt-get -y install on-dhcp-proxy on-http on-taskgraph")['exitcode'], 0, "Package Install failure.") self.assertEqual(fit_common.remote_shell(ENVVARS + "apt-get -y install on-tftp on-syslog")['exitcode'], 0, "Package Install failure.")
def test03_set_auth_user(self): print '**** Installing default admin user' fit_common.remote_shell('rm auth.json') auth_json = open('auth.json', 'w') auth_json.write('{"username":"******"api"]["admin_user"] + '", "password":"******"api"]["admin_pass"] + '", "role":"Administrator"}') auth_json.close() fit_common.scp_file_to_ora('auth.json') rc = fit_common.remote_shell("curl -ks -X POST -H 'Content-Type:application/json' https://localhost:" \ + str(fit_common.GLOBAL_CONFIG['ports']['https']) + "/api/2.0/users -d @auth.json") if rc['exitcode'] != 0: print "ALERT: Auth admin user not set! Please manually set the admin user account if https access is desired."
def test02_clone_rackhd_source(self): print "**** Cloning RackHD source." # clone base repo fit_common.remote_shell('rm -rf ~/rackhd') self.assertEqual(fit_common.remote_shell(PROXYVARS + "git clone " + fit_common.fitinstall()['rackhd']['repo'] + " ~/rackhd" )['exitcode'], 0, "RackHD git clone failure.") self.assertEqual(fit_common.remote_shell("cd ~/rackhd/" + ";git checkout " + fit_common.fitinstall()['rackhd']['branch'] )['exitcode'], 0, "Branch not found on RackHD repo.") '''
def test02_post_install_reboot(self): print "**** Rebooting appliance." shell_data = fit_common.remote_shell("shutdown -r now") self.assertEqual(shell_data['exitcode'], 0, 'ORA reboot registered error') fit_common.countdown(90) print "**** Waiting for login." for dummy in range(0, 30): shell_data = fit_common.remote_shell("pwd") if shell_data['exitcode'] == 0: break else: fit_common.time.sleep(10) self.assertEqual(shell_data['exitcode'], 0, "Shell test failed after appliance reboot")
def test01_update_sudoers_info(self): # update sudoers to preserve proxy environment logs.info(" ***** Update sudoers proxy env ****") sudoersproxy = open("sudoersproxy", 'w') sudoersproxy.write( 'Defaults env_keep="HOME http_proxy https_proxy ftp_proxy"\n') sudoersproxy.close() fit_common.remote_shell('pwd') fit_common.scp_file_to_ora("sudoersproxy") self.assertEqual( fit_common.remote_shell('cp sudoersproxy /etc/sudoers.d/') ['exitcode'], 0, "sudoersproxy config failure.") os.remove('sudoersproxy')
def test02_install_network_config(self): print "**** Installing RackHD network config." # install network config self.assertEqual(fit_common.remote_shell("echo 'auto eth1' > /etc/network/interfaces.d/control.cfg;" "echo 'iface eth1 inet static' >> /etc/network/interfaces.d/control.cfg;" "echo 'address 172.31.128.1' >> /etc/network/interfaces.d/control.cfg;" "echo 'netmask 255.255.252.0' >> /etc/network/interfaces.d/control.cfg" )['exitcode'], 0, "Network config failure.") self.assertEqual(fit_common.remote_shell("echo 'auto eth2' > /etc/network/interfaces.d/pdudirect.cfg;" "echo 'iface eth2 inet static' >> /etc/network/interfaces.d/pdudirect.cfg;" "echo 'address 192.168.1.1' >> /etc/network/interfaces.d/pdudirect.cfg;" "echo 'netmask 255.255.255.0' >> /etc/network/interfaces.d/pdudirect.cfg" )['exitcode'], 0, "Network config failure.")
def test06_startup(self): print "**** Start services." self.assertEqual(fit_common.remote_shell("/etc/init.d/isc-dhcp-server restart")['exitcode'], 0, "dhcp startup failure.") self.assertEqual(fit_common.remote_shell("cd ~/;pm2 start rackhd-pm2-config.yml > /dev/null 2>&1")['exitcode'], 0, "RackHD startup failure.") print "**** Check installation." fit_common.time.sleep(10) for dummy in range(0, 10): try: fit_common.rackhdapi("/api/2.0/config") except: fit_common.time.sleep(10) else: break self.assertEqual(fit_common.rackhdapi("/api/2.0/config")['status'], 200, "Unable to contact RackHD.")
def test04_run_ansible_installer(self): print "**** Run RackHD Ansible installer." # install Ansible self.assertEqual( fit_common.remote_shell( ENVVARS + "cd ~;apt-get -y install ansible")['exitcode'], 0, "Install failure.") # run Ansible RackHD installer self.assertEqual( fit_common.remote_shell( ENVVARS + "cd ~/rackhd/packer/ansible/;" "ansible-playbook -i 'local,' -c local rackhd_package.yml", timeout=600, )['exitcode'], 0, "Install failure.")
def test01_install_rackhd_dependencies(self): print "**** Installing RackHD dependencies." # update sudoers to preserve proxy environment sudoersproxy = open("sudoersproxy", 'w') sudoersproxy.write( 'Defaults env_keep="HOME no_proxy http_proxy https_proxy"\n') sudoersproxy.close() fit_common.remote_shell('pwd') fit_common.scp_file_to_ora("sudoersproxy") self.assertEqual( fit_common.remote_shell('cp sudoersproxy /etc/sudoers.d/') ['exitcode'], 0, "sudoersproxy config failure.") os.remove('sudoersproxy') # install git self.assertEqual( fit_common.remote_shell(ENVVARS + "apt-get -y install git")['exitcode'], 0, "Git install failure.") self.assertEqual( fit_common.remote_shell(ENVVARS + "apt-get -y update")['exitcode'], 0, "update failure.") self.assertEqual( fit_common.remote_shell(ENVVARS + "apt-get -y dist-upgrade")['exitcode'], 0, "upgrade failure.") self.assertEqual( fit_common.remote_shell("git config --global http.sslverify false") ['exitcode'], 0, "Git config failure.") self.assertEqual( fit_common.remote_shell("git config --global http.proxy " + fit_common.GLOBAL_CONFIG['repos']['proxy']) ['exitcode'], 0, "Git proxy config failure.")
def test02_install_node(self): print "**** Installing NodeJS 4" # install Node fit_common.remote_shell(ENVVARS + 'apt-get -y remove nodejs nodejs-legacy') fit_common.remote_shell(ENVVARS + 'wget --quiet -O - https://deb.nodesource.com/gpgkey/nodesource.gpg.key | sudo apt-key add -;' 'echo "deb https://deb.nodesource.com/node_4.x trusty main" | tee /etc/apt/sources.list.d/nodesource.list;' 'echo "deb-src https://deb.nodesource.com/node_4.x trusty main" | tee -a /etc/apt/sources.list.d/nodesource.list;') fit_common.remote_shell(ENVVARS + 'apt-get -y update;apt-get -y install nodejs;') fit_common.remote_shell(ENVVARS + 'apt-get -y install npm;npm config set https-proxy ' + fit_common.GLOBAL_CONFIG['repos']['proxy']) self.assertEqual(fit_common.remote_shell('node -v')['exitcode'], 0, "Node install failed..")
def test03_install_docker(self): # Check if docker-compose is already installed rsp = fit_common.remote_shell("docker -v") if rsp['exitcode'] == 0: logs.info(" Docker already installed") if "Docker version" in rsp['stdout']: logs.info(" Docker installed: %s", rsp['stdout']) else: logs.info(" Install Docker") # apt-get update command = PROXYVARS + "sudo apt-get update" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "get-update failed.") # Getting the docker installation file from docker.com command = PROXYVARS + "sudo wget -qO- https://get.docker.com/ | sh" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "Getting docker file from docker.com failed.") # Checking the users with whoami command command = "sudo usermod -aG docker $(whoami)" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "whoami command failed.") # Check if docker-compose is already installed rsp = fit_common.remote_shell("docker-compose -v") if rsp['exitcode'] == 0: logs.info(" docker-compose already installed") if "docker-compose version" in rsp['stdout']: logs.info(" Docker installed: %s", rsp['stdout']) else: # Installing python pip command = PROXYVARS + "sudo apt-get -y install python-pip" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "Pip install failed.") # Installing docker compose logs.info(" Install docker-compose") command = PROXYVARS + "sudo pip install docker-compose" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "Docker compose install failed.") # Setup docker proxy environment logs.info(" Setup Docker Proxy") dockerproxy = open("dockerproxy", 'w') # dockerproxy.write('export http_proxy="http://web.hwimo.lab.emc.com:3128/"\n') dockerproxy.write(PROXYVARS) dockerproxy.close() fit_common.scp_file_to_ora("dockerproxy") self.assertEqual(fit_common.remote_shell('cat dockerproxy >> /etc/default/docker' )['exitcode'], 0, "adding docker proxy config failed.") os.remove('dockerproxy') # Restart the docker service command = PROXYVARS + "sudo service docker restart" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "Docker service restart failed.") time.sleep(60)
def test01_install_rackhd_dependencies(self): print "**** Installing RackHD dependencies." # update sudoers to preserve proxy environment sudoersproxy = open("sudoersproxy", 'w') sudoersproxy.write('Defaults env_keep="HOME no_proxy http_proxy https_proxy"\n') sudoersproxy.close() fit_common.scp_file_to_ora("sudoersproxy") self.assertEqual(fit_common.remote_shell('cp sudoersproxy /etc/sudoers.d/' )['exitcode'], 0, "sudoersproxy config failure.") os.remove('sudoersproxy') # install git self.assertEqual(fit_common.remote_shell("apt-get -y install git")['exitcode'], 0, "Git install failure.") self.assertEqual(fit_common.remote_shell("apt-get -y update")['exitcode'], 0, "update failure.") self.assertEqual(fit_common.remote_shell("apt-get -y dist-upgrade")['exitcode'], 0, "upgrade failure.") self.assertEqual(fit_common.remote_shell("git config --global http.proxy " + fit_common.GLOBAL_CONFIG['repos']['proxy'] )['exitcode'], 0, "Git proxy config failure.")
def __apply_obmsetting_to_node(self, nodeid): usr = None # pwd = '' response = fit_common.rackhdapi( '/api/2.0/nodes/' + nodeid + '/catalogs/bmc') bmcip = response['json']['data']['IP Address'] # Try credential record in config file for creds in fit_common.fitcreds()['bmc']: if fit_common.remote_shell( 'ipmitool -I lanplus -H ' + bmcip + ' -U ' + creds['username'] + ' -P ' + creds['password'] + ' fru')['exitcode'] == 0: usr = creds['username'] pwd = creds['password'] break # Put the credential to OBM settings if usr is not None: payload = { "service": "ipmi-obm-service", "config": { "host": bmcip, "user": usr, "password": pwd}, "nodeId": nodeid} api_data = fit_common.rackhdapi("/api/2.0/obms", action='put', payload=payload) if api_data['status'] == 201: return True return False
def test11_add_management_server(self): log.info_5("**** Creating management server.") usr = "" pwd = "" # find correct BMC passwords from credentials list for creds in fit_common.fitcreds()['bmc']: if fit_common.remote_shell('ipmitool -I lanplus -H ' + fit_common.fitcfg()['bmc'] + ' -U ' + creds['username'] + ' -P ' + creds['password'] + ' fru')['exitcode'] == 0: usr = creds['username'] pwd = creds['password'] # create management node using these creds if usr != "" and pwd != "": payload = {"name": "Management Server " + str(time.time()), "type": "mgmt", "autoDiscover": True, "obms": [{"service": "ipmi-obm-service", "config": {"host": fit_common.fitcfg()['bmc'], "user": usr, "password": pwd}}]} api_data = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=payload) self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got ' + str(api_data['status'])) else: self.fail("Unable to contact management server BMC, skipping MGMT node create")
def test03_run_ansible_installer(self): print "**** Run RackHD Ansible installer." self.assertEqual(fit_common.remote_shell(PROXYVARS + "cd ~/rackhd/packer/ansible/;" "ansible-playbook -i 'local,' -c local rackhd_local.yml", timeout=3000, )['exitcode'], 0, "RackHD Install failure.")
def _apply_obmsetting_to_node(self, nodeid): # usr = '' # pwd = '' response = fit_common.rackhdapi('/api/2.0/nodes/' + nodeid + '/catalogs/bmc') bmcip = response['json']['data']['IP Address'] # Try credential record in config file for creds in fit_common.fitcreds()['bmc']: if fit_common.remote_shell('ipmitool -I lanplus -H ' + bmcip + ' -U ' + creds['username'] + ' -P ' + creds['password'] + ' fru')['exitcode'] == 0: usr = creds['username'] pwd = creds['password'] break # Put the credential to OBM settings if usr != "": payload = { "service": "ipmi-obm-service", "config": { "host": bmcip, "user": usr, "password": pwd }, "nodeId": nodeid } api_data = fit_common.rackhdapi("/api/2.0/obms", action='put', payload=payload) if api_data['status'] == 201: return True return False
def test09_add_management_server(self): print "**** Creating management server." usr = "" pwd = "" # find correct BMC passwords from global config for creds in fit_common.GLOBAL_CONFIG['credentials']['bmc']: if fit_common.remote_shell('ipmitool -I lanplus -H ' + fit_common.ARGS_LIST['bmc'] + ' -U ' + creds['username'] + ' -P ' + creds['password'] + ' fru')['exitcode'] == 0: usr = creds['username'] pwd = creds['password'] # create management node using these creds payload = { "name":"Management Server", "type": "mgmt", "autoDiscover": "true", "ipmi-obm-service": { "host": fit_common.ARGS_LIST['bmc'], "user": usr, "password": pwd } } api_data = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=payload) self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got ' + str(api_data['status'])) # run discovery workflow payload = { "name": "Graph.MgmtSKU.Discovery", "options":{"defaults": {"nodeId": api_data['json']['id']}} } api_data = fit_common.rackhdapi("/api/2.0/nodes/" + api_data['json']['id'] + "/workflows", action='post', payload=payload) self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got ' + str(api_data['status']))
def reboot_node(self, vmnum): address = "" if (vmnum == 1): address = fit_common.fitargs()['rackhd_host'] else: address = fit_common.fitargs()['rackhd_host'].replace("ora", "ora-" + str(vmnum - 1)) fit_common.remote_shell('shutdown -r now', vmnum=vmnum) time.sleep(3) for i in range(0, 15): if subprocess.call("ping -c 1 -w 5 " + address, shell=True) == 0: return True time.sleep(1) return False
def test01_install_mongo_resource(self): # create resource on first active node vmnum = nodelist[0] sb_net = self.get_southbound_network() self.assertIsNotNone(sb_net, "Could not find southbound address") for mongo in range(1, numrs + 1): # start mongo container as pacemaker resource rsc = 'docker_mongo_{}'.format(mongo) command = ( "crm configure primitive {} ocf:heartbeat:docker " + "params allow_pull=true image='registry.hwimo.lab.emc.com/mongo' " + "run_opts=\\\'--privileged=true --net='host' -d -p 27017:27017\\\' " + "run_cmd=\\\'--replSet mongo_rs --logpath /var/log/mongodb/mongod.log\\\' " + "meta is-managed='true'").format(rsc) self.assertEqual( fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "{} resource failure.".format(rsc)) # create mongo virtual ip resource ip = '{}.12{}'.format(sb_net, mongo) vip_dict['mongo'].append(ip) rsc_ip = 'mongo_addr_{}'.format(mongo) self.assertTrue( self.configure_virtual_ip_resource(vmnum, ip, rsc_ip), "{} resource failure.".format(rsc_ip)) # colocate mongo and virtual IPs mongo_cls = 'mongo{}'.format(mongo) command = "crm configure colocation {} inf: {} {}".format( mongo_cls, rsc, rsc_ip) self.assertEqual( fit_common.remote_shell(command, vmnum=vmnum)['exitcode'], 0, "colocation failure") # create mongo replica config mongo_rep = open('mongo_replica_init.bash', 'w') mongo_rep.write(self.create_mongo_config(vip_dict['mongo'])) mongo_rep.close() # copy file to ora fit_common.scp_file_to_host('mongo_replica_init.bash', vmnum) os.remove('mongo_replica_init.bash') fit_common.remote_shell("chmod 777 mongo_replica_init.bash", vmnum=vmnum)['exitcode'] # run script to initiate replica set self.assertEqual( fit_common.remote_shell("./mongo_replica_init.bash", vmnum=vmnum)['exitcode'], 0, "Mongo replica initiation failure")
def reboot_node(self, vmnum): address = "" if (vmnum == 1): address = fit_common.fitargs()['rackhd_host'] else: address = fit_common.fitargs()['rackhd_host'].replace( "ora", "ora-" + str(vmnum - 1)) fit_common.remote_shell('shutdown -r now', vmnum=vmnum) time.sleep(3) for i in range(0, 15): if subprocess.call("ping -c 1 -w 5 " + address, shell=True) == 0: return True time.sleep(1) return False
def test06_startup(self): print "Start services." startup = open('startup.sh', 'w') startup.write('cd ~/;nf start&\n') startup.close() fit_common.scp_file_to_ora('startup.sh') self.assertEqual(fit_common.remote_shell("chmod 777 startup.sh;/etc/init.d/isc-dhcp-server restart")['exitcode'], 0, "dhcp startup failure.") self.assertEqual(fit_common.remote_shell("nohup ./startup.sh")['exitcode'], 0, "RackHD startup failure.") print "**** Check installation." for dummy in range(0, 10): try: fit_common.rackhdapi("/api/2.0/config") except: fit_common.time.sleep(10) else: break self.assertEqual(fit_common.rackhdapi("/api/2.0/config")['status'], 200, "Unable to contact RackHD.")
def test01_install_network_config(self): for vmnum in range(1, numvms + 1): # collect nic names getifs = fit_common.remote_shell("ifconfig -s -a |tail -n +2 |grep -v -e Iface -e lo -e docker", vmnum=vmnum) # clean out login stuff splitifs = getifs['stdout'].split('\n') for item in splitifs: if "assword" not in item and item.split(" ")[0]: ifslist.append(item.split(" ")[0]) self.assertNotEqual(len(ifslist), 0, "Found no interfaces for node {}".format(vmnum)) control_ip = '172.31.128.{}'.format(vmnum) # install control network config control_cfg = open('control.cfg', 'w') control_cfg.write('auto ' + ifslist[1] + '\n' 'iface ' + ifslist[1] + ' inet static\n' 'address ' + control_ip + '\n' 'netmask 255.255.252.0\n') control_cfg.close() # copy file to ORA fit_common.scp_file_to_host('control.cfg', vmnum) os.remove('control.cfg') self.assertEqual(fit_common.remote_shell('cp control.cfg /etc/network/interfaces.d/', vmnum=vmnum)['exitcode'], 0, "Control network config failure.") # startup NIC fit_common.remote_shell('ip addr add ' + control_ip + '/22 dev ' + ifslist[1], vmnum=vmnum) fit_common.remote_shell('ip link set ' + ifslist[1] + ' up', vmnum=vmnum) self.assertEqual(fit_common.remote_shell('ping -c 1 -w 5 ' + control_ip, vmnum=vmnum)['exitcode'], 0, 'Control NIC failure.')
def test05_install_rackhd_config_files(self): print "**** Installing RackHD config files." # create RackHD config hdconfig = fit_common.fitcfg()['rackhd-config'] config_json = open('config.json', 'w') config_json.write(fit_common.json.dumps(hdconfig, sort_keys=True, indent=4)) config_json.close() # AMQP config files rabbitmq_config = open('rabbitmq.config', 'w') rabbitmq_config.write('[{rabbit,[{tcp_listeners, [5672]},{loopback_users, []}]},{rabbitmq_management,[{listener, [{port, 15672},{ip,"127.0.0.1"}]}]}].') rabbitmq_config.close() # copy files to ORA fit_common.scp_file_to_ora('config.json') fit_common.scp_file_to_ora('rabbitmq.config') self.assertEqual(fit_common.remote_shell('cp config.json /opt/monorail/')['exitcode'], 0, "RackHD Config file failure.") self.assertEqual(fit_common.remote_shell('cp rabbitmq.config /etc/rabbitmq/')['exitcode'], 0, "AMQP Config file failure.") os.remove('config.json') os.remove('rabbitmq.config')
def test_create_os_repo_from_local(self): for osrepo in fit_common.fitcfg()["image_service"]["os_image"]: os_name = osrepo["osname"] os_version = osrepo["version"] iso_url = osrepo["url"] servercmd = "wget " + osrepo["url"] serverip = self._get_serverip() fit_common.remote_shell( shell_cmd=servercmd, address=serverip, user=fit_common.fitcfg()["image_service"]['usr'], password=fit_common.fitcfg()["image_service"]['pwd']) path = "/home/" + fit_common.fitcfg()["image_service"]['usr'] file_name = self._download_file(iso_url) self.assertNotEqual( self._upload_os_from_local(os_name, os_version, path + '/' + file_name), "fail", "upload image failed!") self.assertTrue(self._mount_local_os_repo(file_name, os_name), "Could not mount ISO") self.assertTrue(self._compare_repo(os_name, os_version), "Fileserver compare failed!") self._release(file_name, os_name) self.test_delete_all_images()
def setUp(self): # collect active nodes in cluster for vmnum in range(1, numvms + 1): command = "crm_mon -X | grep 'node{}.*online=.true' -q".format(vmnum) status = fit_common.remote_shell(command, vmnum=vmnum)['exitcode'] if status != 0: err.append('node{} is offline'.format(vmnum)) else: nodelist.append(vmnum)
def setUp(self): # collect active nodes in cluster for vmnum in range(1, numvms + 1): command = "crm_mon -X | grep 'node{}.*online=.true' -q".format( vmnum) status = fit_common.remote_shell(command, vmnum=vmnum)['exitcode'] if status != 0: err.append('node{} is offline'.format(vmnum)) else: nodelist.append(vmnum)
def test04_setup_rackhd_docker_services(self): # add the .env variables for HOST IP into the ".env" file envfile = open("envfile", 'w') envfile.write("TAG=latest\n") envfile.write("REGISTRY_IP=172.31.128.1\n") host_ip = "HOST_IP=" + socket.gethostbyname(fit_common.fitcfg()['rackhd_host']) + "\n" envfile.write(host_ip) envfile.close() fit_common.scp_file_to_ora("envfile") self.assertEqual(fit_common.remote_shell('cp envfile /home/onrack/.env' )['exitcode'], 0, "copy of env file failed.") os.remove('envfile') # Get the username and password from config-mn/credentials.json username = fit_common.fitcreds()['docker_hub'][0]['username'] password = fit_common.fitcreds()['docker_hub'][0]['password'] command = 'cd rackhd/docker/dell; sudo docker login --username='******' --password='******'exitcode'], 0, "Docker login failed.") # Docker up consul command = "cd rackhd/docker/dell; sudo docker-compose up -d consul" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "Docker up consul failed.") time.sleep(30) command = "cd rackhd/docker/dell; sudo chmod +x set_config.sh; sudo ./set_config.sh" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "set_config.sh failed.") # Docker up the rest of micro service containers command = "cd rackhd/docker/dell; sudo docker-compose up -d" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "docker-compose up failed.") time.sleep(180) # Set port to 8080 in smi config file port_var = fit_common.fitports()['http'] command = "cd rackhd/docker/dell; sudo sed -i 's/9090/" + str(port_var) + "/g' set_rackhd_smi_config.sh" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "set_rackhd_smi_config.sh failed.") # Populates smi config file command = "cd rackhd/docker/dell; sudo ./set_rackhd_smi_config.sh" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "set_rackhd_smi_config.sh failed.") # Replace callback Uri port from 9988 to 9080 in smi config file command = "cd /opt/monorail; sudo sed -i 's/9988/9080/g' smiConfig.json" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "Change port from 9988 to 9080 in smiConfig failed.") # Restart on-http service command = "sudo service on-http restart" self.assertEqual(fit_common.remote_shell(command)['exitcode'], 0, "failed to start on-http service.")
def test04_install_rackhd_binary_support_packages(self): print "**** Installing RackHD binaries." self.assertEqual( fit_common.remote_shell( ENVVARS + "mkdir -p /var/renasar/on-tftp/static/tftp;" "cd /var/renasar/on-tftp/static/tftp;" "wget 'https://dl.bintray.com/rackhd/binary/ipxe/undionly.kpxe';" "wget 'https://dl.bintray.com/rackhd/binary/ipxe/monorail-undionly.kpxe';" "wget 'https://dl.bintray.com/rackhd/binary/ipxe/monorail-efi64-snponly.efi';" "wget 'https://dl.bintray.com/rackhd/binary/ipxe/monorail-efi32-snponly.efi';" )['exitcode'], 0, "Binary Support Install failure.") self.assertEqual( fit_common.remote_shell( ENVVARS + "mkdir -p /var/renasar/on-http/static/http/common;" "cd /var/renasar/on-http/static/http/common;" "wget 'https://dl.bintray.com/rackhd/binary/builds/base.trusty.3.16.0-25-generic.squashfs.img';" "wget 'https://dl.bintray.com/rackhd/binary/builds/discovery.overlay.cpio.gz';" "wget 'https://dl.bintray.com/rackhd/binary/builds/initrd.img-3.16.0-25-generic';" "wget 'https://dl.bintray.com/rackhd/binary/builds/vmlinuz-3.16.0-25-generic';" )['exitcode'], 0, "Binary Support Install failure.")
def test_redfish_v1_managers_rackhd_ethernetinterfaces(self): api_data = fit_common.rackhdapi('/redfish/v1/Managers/RackHD/EthernetInterfaces') self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status'])) #iterate through member links for item in api_data['json']['Members']: manager_data = fit_common.rackhdapi(item['@odata.id']) self.assertEqual(manager_data['status'], 200, "Was expecting code 200. Got " + str(manager_data['status'])) # if configured, test IP addresses of each port if 'IPv4Addresses' in manager_data['json'] and 'Address' in manager_data['json']['IPv4Addresses']: for item in manager_data['json']['IPv4Addresses']: self.assertEqual(fit_common.remote_shell('ping -c 1 ' + item["Address"])['exitcode'], 0, "Manager IP address not found.")
def test06_install_network_config(self): print "**** Installing RackHD network config." # install network config self.assertEqual( fit_common.remote_shell( "echo 'auto " + IFLIST[7] + "' > /etc/network/interfaces.d/control.cfg;" "echo 'iface " + IFLIST[7] + " inet static' >> /etc/network/interfaces.d/control.cfg;" "echo 'address 172.31.128.1' >> /etc/network/interfaces.d/control.cfg;" "echo 'netmask 255.255.252.0' >> /etc/network/interfaces.d/control.cfg" )['exitcode'], 0, "Network config failure.") self.assertEqual( fit_common.remote_shell( "echo 'auto " + IFLIST[8] + "' > /etc/network/interfaces.d/pdudirect.cfg;" "echo 'iface " + IFLIST[8] + " inet static' >> /etc/network/interfaces.d/pdudirect.cfg;" "echo 'address 192.168.1.1' >> /etc/network/interfaces.d/pdudirect.cfg;" "echo 'netmask 255.255.255.0' >> /etc/network/interfaces.d/pdudirect.cfg" )['exitcode'], 0, "Network config failure.")