def test_drive_capacity_consistency(self): ssh = sshclient.SSH("127.0.0.1", "root", "root", port=2222) assert ssh.wait_for_host_up() is True s = UnixSocket(path) s.connect() s.recv() # 1. expect: yml img zise == host disk size cmd = "qemu-img info {}".format(drive_test_image) r = os.popen(cmd) img_info = r.read() r.close() self.assertIn('virtual size: 1.0G', img_info, "Existing host drive image \ size is different from the size defined in yaml") # 2. expect: yml img size == guest disk size status, stdout = ssh.exec_command('sg_readcap /dev/sdb') self.assertIn('Device size: 1073741760 bytes', stdout, "Guest drive image \ size is different from the size defined in yaml") last_lba = re.search("Last logical block address=([0-9]{0,7})", stdout).group(1) invalid_lba = int(last_lba) + 1 # 3. expect: report failed when access lba range out of 1G status, sg_info = ssh.exec_command("sg_map -i | grep sdb") assert re.search("ST4000NM0005", sg_info).group() sg_index = re.search("sg([0-9]{1,2})", sg_info).group(1) cmd = "sg_dd if=/dev/sg{0} bs=520 count=1 skip={1}".format(sg_index, last_lba) status, stdout = ssh.exec_command(cmd) self.assertNotIn('failed', stdout, 'Failed, read failed when lba is valid') cmd = "sg_dd if=/dev/sg{0} bs=520 count=1 skip={1}".format(sg_index, str(invalid_lba)) status, stdout = ssh.exec_command(cmd) self.assertIn('failed', stdout, 'Failed, read success when lba out of range') s.close
def start_node(): global conf global ssh global s fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["networks"] = [{ "device": "e1000", "id": "e1000.0", "mac": "00:60:16:9c:ff:6a", "network_mode": "nat", "port_forward": [{ "outside": 2222, "inside": 22, "protocal": "tcp" }] }, { "device": "e1000", "id": "e1000.1", "network_mode": "nat", "mac": target_mac, }] conf["compute"]["storage_backend"] = [ { "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, ] node = model.CNode(conf) node.init() node.precheck() node.start() # first s : unixsocket .monitor path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() ssh = prepare_ssh()
def start_node(): global conf global ssh global s fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["networks"] = [{ "device": "vmxnet3", "id": "mgmt", "mac": "00:60:16:9c:50:6a" }] conf["compute"]["networks"][0]["port_forward"] = [{ "outside": 2222, "inside": 22, "protocal": "tcp" }] conf["compute"]["storage_backend"] = [ { "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, ] node = model.CNode(conf) node.init() node.precheck() node.start() path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() # wait until system is ready for ssh. ssh = sshclient.SSH(host="127.0.0.1", username="******", password="******", port=2222) ssh.wait_for_host_up()
def start_node(): global conf global ssh global s fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["networks"] = [ { "device": "vmxnet3", "id": "mgmt", "mac": "00:60:16:9c:50:6a" } ] conf["compute"]["networks"][0]["port_forward"] = [ { "outside": 2222, "inside": 22, "protocal": "tcp" } ] conf["compute"]["storage_backend"] = [ { "type": "ahci", "max_drive_per_controller": 6, "drives": [ { "size": 8, "file": fixtures.image } ] }, ] node = model.CNode(conf) node.init() node.precheck() node.start() path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = { "execute": "qmp_capabilities" } s.send(json.dumps(payload_enable_qmp)) s.recv() # wait until system is ready for ssh. ssh = sshclient.SSH(host="127.0.0.1", username="******", password="******", port=2222) ssh.wait_for_host_up()
def test_drive_capacity_consistency(self): ssh = sshclient.SSH("127.0.0.1", "root", "root", port=2222) assert ssh.wait_for_host_up() is True s = UnixSocket(path) s.connect() s.recv() # 1. expect: yml img zise == host disk size cmd = "qemu-img info {}".format(drive_test_image) r = os.popen(cmd) img_info = r.read() r.close() self.assertIn( 'virtual size: 1.0G', img_info, "Existing host drive image \ size is different from the size defined in yaml" ) # 2. expect: yml img size == guest disk size status, stdout = ssh.exec_command('sg_readcap /dev/sdb') self.assertIn( 'Device size: 1073741760 bytes', stdout, "Guest drive image \ size is different from the size defined in yaml" ) last_lba = re.search("Last logical block address=([0-9]{0,7})", stdout).group(1) invalid_lba = int(last_lba) + 1 # 3. expect: report failed when access lba range out of 1G status, sg_info = ssh.exec_command("sg_map -i | grep sdb") assert re.search("ST4000NM0005", sg_info).group() sg_index = re.search("sg([0-9]{1,2})", sg_info).group(1) cmd = "sg_dd if=/dev/sg{0} bs=520 count=1 skip={1}".format( sg_index, last_lba) status, stdout = ssh.exec_command(cmd) self.assertNotIn('failed', stdout, 'Failed, read failed when lba is valid') cmd = "sg_dd if=/dev/sg{0} bs=520 count=1 skip={1}".format( sg_index, str(invalid_lba)) status, stdout = ssh.exec_command(cmd) self.assertIn('failed', stdout, 'Failed, read success when lba out of range') s.close
class test_compute_configuration_change(unittest.TestCase): def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() def tearDown(self): node = model.CNode(self.conf) node.init() node.stop() node.terminate_workspace() self.conf = None # if os.path.exists(TMP_CONF_FILE): # os.unlink(TMP_CONF_FILE) drive_files = ["/tmp/sda.img", "/tmp/sdb.img"] for drive_file in drive_files: if os.path.exists(drive_file): os.unlink(drive_file) def test_set_vcpu(self): self.conf["compute"]["cpu"]["quantities"] = 8 # with open(TMP_CONF_FILE, "w") as yaml_file: # yaml.dump(self.conf, yaml_file, default_flow_style=False) node = model.CNode(self.conf) node.init() node.precheck() node.start() str_result = run_command(PS_QEMU, True, subprocess.PIPE, subprocess.PIPE)[1] assert "qemu-system-x86_64" in str_result assert "-smp 8" in str_result def test_set_cpu_family(self): self.conf["compute"]["cpu"]["type"] = "IvyBridge" # with open(TMP_CONF_FILE, "w") as yaml_file: # yaml.dump(self.conf, yaml_file, default_flow_style=False) node = model.CNode(self.conf) node.init() node.precheck() node.start() str_result = run_command(PS_QEMU, True, subprocess.PIPE, subprocess.PIPE)[1] assert "qemu-system-x86_64" in str_result assert "-cpu IvyBridge" in str_result def test_set_memory_capacity(self): self.conf["compute"]["memory"]["size"] = 1536 with open(TMP_CONF_FILE, "w") as yaml_file: yaml.dump(self.conf, yaml_file, default_flow_style=False) node = model.CNode(self.conf) node.init() node.precheck() node.start() str_result = run_command(PS_QEMU, True, subprocess.PIPE, subprocess.PIPE)[1] assert "qemu-system-x86_64" in str_result assert "-m 1536" in str_result def test_set_disk_drive(self): self.conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": "/tmp/sda.img" }, { "size": 8, "file": "/tmp/sdb.img" }] }] # with open(TMP_CONF_FILE, "w") as yaml_file: # yaml.dump(self.conf, yaml_file, default_flow_style=False) node = model.CNode(self.conf) node.init() node.precheck() node.start() qemu_pid = get_qemu_pid(node) qemu_cmdline = open( "/proc/{}/cmdline".format(qemu_pid)).read().replace("\x00", " ") assert "qemu-system-x86_64" in qemu_cmdline assert "/tmp/sda.img" in qemu_cmdline assert "/tmp/sdb.img" in qemu_cmdline assert "format=qcow2" in qemu_cmdline def test_qemu_boot_from_disk_img(self): MD5_IMG = "986e5e63e8231a307babfbe9c81ca210" DOWNLOAD_URL = "https://github.com/InfraSIM/test/raw/master/image/kcs.img" test_img_file = "/tmp/kcs.img" try: helper.fetch_image(DOWNLOAD_URL, MD5_IMG, test_img_file) except InfraSimError, e: print e.value assert False self.conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": test_img_file }] }] # with open(TMP_CONF_FILE, "w") as yaml_file: # yaml.dump(self.conf, yaml_file, default_flow_style=False) node = model.CNode(self.conf) node.init() node.precheck() node.start() # Port forward from guest 22 to host 2222 path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() payload_port_forward = { "execute": "human-monitor-command", "arguments": { "command-line": "hostfwd_add ::2222-:22" } } s.send(json.dumps(payload_port_forward)) s.recv() s.close() import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) paramiko.util.log_to_file("filename.log") helper.try_func(600, paramiko.SSHClient.connect, ssh, "127.0.0.1", port=2222, username="******", password="******", timeout=120) ssh.close()
def test_qemu_boot_from_disk_img(self): self.conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }] # with open(TMP_CONF_FILE, "w") as yaml_file: # yaml.dump(self.conf, yaml_file, default_flow_style=False) node = model.CNode(self.conf) node.init() node.precheck() node.start() # Port forward from guest 22 to host 2222 path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() payload_port_forward = { "execute": "human-monitor-command", "arguments": { "command-line": "hostfwd_add ::2222-:22" } } s.send(json.dumps(payload_port_forward)) s.recv() s.close() import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) paramiko.util.log_to_file("filename.log") helper.try_func(600, paramiko.SSHClient.connect, ssh, "127.0.0.1", port=2222, username="******", password="******", timeout=120) ssh.close()
def start_node(node_type): """ create two drive for comparasion. First drive has additional page, second doesn't """ global conf global tmp_conf_file global ssh # create a empty image for test. os.system("touch {0}".format(test_drive_image)) fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["type"] = node_type conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, { "type": "megasas", "max_drive_per_controller": 16, "drives": [{ "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 0, "slot_number": 0, "page-file": page_file }, { "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "12345678", "version": "M001", "wwn": "0x5000C500852E3141", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 1 }] }] with open(tmp_conf_file, "w") as yaml_file: yaml.dump(conf, yaml_file, default_flow_style=False) node = model.CNode(conf) node.init() node.precheck() node.start() time.sleep(3) # Port forward from guest 22 to host 2222 path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() payload_port_forward = { "execute": "human-monitor-command", "arguments": { "command-line": "hostfwd_add ::2222-:22" } } s.send(json.dumps(payload_port_forward)) s.recv() s.close() time.sleep(3) # wait until system is ready for ssh. ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) paramiko.util.log_to_file("filename.log") helper.try_func(600, paramiko.SSHClient.connect, ssh, "127.0.0.1", port=2222, username="******", password="******", timeout=120) ssh.close()
def start_node(node_type): """ create two drive for comparasion. First drive has additional page, second doesn't """ global conf global tmp_conf_file global ssh global s # create a empty image for test. os.system("touch {0}".format(test_drive_image)) fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["type"] = node_type conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, { "type": "megasas", "max_drive_per_controller": 16, "drives": [{ "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 0, "slot_number": 0, "page-file": page_file }, { "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "12345678", "version": "M001", "wwn": "0x5000C500852E3141", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 1 }] }] with open(tmp_conf_file, "w") as yaml_file: yaml.dump(conf, yaml_file, default_flow_style=False) node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() # wait until system is ready for ssh. ssh = sshclient.SSH(host="127.0.0.1", username="******", password="******", port=2222) ssh.wait_for_host_up()
def test_step2_nvmedrive_insert(self): ssh = sshclient.SSH("127.0.0.1", "root", "root", port=2222) assert ssh.wait_for_host_up() is True s = UnixSocket(path) s.connect() s.recv() status, stdout = ssh.exec_command('nvme list') self.assertNotIn('SSD00000001', stdout, "Failed: SN is duplicate") # 2.1: insert known NVME drive payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() payload_drive_insert = { "execute": "human-monitor-command", "arguments": { "command-line": "device_add nvme," "id=dev-nvme-1,drive=nvme-1," "model_number=INTEL-SSD00000001," "serial=FUKD72220009375A01," "bus=downstream4,cmb_size_mb=1" } } s.send(json.dumps(payload_drive_insert)) time.sleep(5) status, stdout = ssh.exec_command('nvme list') self.assertIn('SSD00000001', stdout, "NVME drive insert failed") # 2.2: IO test, nvme1n1, SSD00000001 status, stdout = ssh.exec_command( 'sudo fio -filename=/dev/nvme1n1 -direct=1 -iodepth 1 -thread \ -rw=write -ioengine=psync -bs=4k -size=10M -numjobs=10 \ -runtime=100 -do_verify=1 -group_reporting -name=mytest') self.assertNotIn('error', stdout, "New NVME drive r/w test failed") s.close()
def test_nvmeerror_inject(self): ssh = sshclient.SSH(host="127.0.0.1", username="******", password="******", port=2222) ssh.wait_for_host_up() s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() payload_error_inject = { "execute": "nvme-status-code-error-inject", "arguments": { "count": 65536, "opcode": "rw", "id": "dev-nvme-0", "nsid": 1, "status_field": { "dnr": True, "more": True } } } payload_nvmeclear = { 'execute': 'nvme-status-code-error-inject', 'arguments': { 'count': 0, 'opcode': 'rw', 'id': 'dev-nvme-0', 'nsid': 0, 'status_field': { 'sc': 0, 'sct': 0, 'dnr': True, 'more': True } } } for cmd_error_inject in error_inject_list: payload_error_inject['arguments']['status_field'][ 'sc'] = cmd_error_inject[0] payload_error_inject['arguments']['status_field'][ 'sct'] = cmd_error_inject[1] s.send(json.dumps(payload_error_inject)) s.recv() status, output = ssh.exec_command( "nvme read /dev/nvme0n1 -z 3008 -a 128") self.assertNotIn("Success", output, "error of %s inject failed" % cmd_error_inject[2]) s.send(json.dumps(payload_nvmeclear)) s.recv() status, output = ssh.exec_command( "nvme read /dev/nvme0n1 -z 3008 -a 128") self.assertIn("Success", output, "clear error failed") s.close()
def test_qemu_boot_from_disk_img(self): self.conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{"size": 8, "file": fixtures.image}] }] # with open(TMP_CONF_FILE, "w") as yaml_file: # yaml.dump(self.conf, yaml_file, default_flow_style=False) node = model.CNode(self.conf) node.init() node.precheck() node.start() # Port forward from guest 22 to host 2222 path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = { "execute": "qmp_capabilities" } s.send(json.dumps(payload_enable_qmp)) s.recv() payload_port_forward = { "execute": "human-monitor-command", "arguments": { "command-line": "hostfwd_add ::2222-:22" } } s.send(json.dumps(payload_port_forward)) s.recv() s.close() import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) paramiko.util.log_to_file("filename.log") helper.try_func(600, paramiko.SSHClient.connect, ssh, "127.0.0.1", port=2222, username="******", password="******", timeout=120) ssh.close()
def port_forward(node): # Port forward from guest 22 to host 2222 path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = { "execute": "qmp_capabilities" } s.send(json.dumps(payload_enable_qmp)) s.recv() payload_port_forward = { "execute": "human-monitor-command", "arguments": { "command-line": "hostfwd_add ::2222-:22" } } s.send(json.dumps(payload_port_forward)) s.recv() s.close()
def test_nvmeerror_inject(self): self.assertIsNotNone(ssh, "Can't connect node by ssh") # "dev-nvme-0" is the first NVME dev. dev_sn = conf["compute"]["storage_backend"][1]["serial"] dev = self.get_nvme_dev(dev_sn) self.assertIsNotNone(dev, "Can't found nvme device for sn {}".format(dev_sn)) s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = { "execute": "qmp_capabilities" } s.send(json.dumps(payload_enable_qmp)) s.recv() payload_error_inject = { "execute": "nvme-status-code-error-inject", "arguments": { "count": 65536, "opcode": "rw", "id": "dev-nvme-0", "nsid": 1, "status_field": { "dnr": True, "more": True } } } payload_nvmeclear = { 'execute': 'nvme-status-code-error-inject', 'arguments': { 'count': 0, 'opcode': 'rw', 'id': 'dev-nvme-0', 'nsid': 0, 'status_field': { 'sc': 0, 'sct': 0, 'dnr': True, 'more': True } } } # Redirect stderr to stdout since 'Success' is printed to stderr sometimes" cmd = "nvme read {} -z 3008 -a 128 2>&1".format(dev) for cmd_error_inject in error_inject_list: payload_error_inject['arguments']['status_field']['sc'] = cmd_error_inject[0] payload_error_inject['arguments']['status_field']['sct'] = cmd_error_inject[1] s.send(json.dumps(payload_error_inject)) s.recv() output = helper.ssh_exec(ssh, cmd) self.assertNotIn("Success", output, "error of %s inject failed" % cmd_error_inject[2]) s.send(json.dumps(payload_nvmeclear)) s.recv() output = helper.ssh_exec(ssh, cmd) self.assertIn("Success", output, "clear error failed") s.close()
def start_node(node_type): """ create two drive for comparasion. First drive has additional page, second doesn't """ global conf global tmp_conf_file global ssh global s # create a empty image for test. os.system("touch {0}".format(test_drive_image)) fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["type"] = node_type conf["compute"]["networks"][0]["port_forward"] = [ { "outside": 2222, "inside": 22, "protocal": "tcp" } ] conf["compute"]["storage_backend"] = [ { "type": "ahci", "max_drive_per_controller": 6, "drives": [ { "size": 8, "file": fixtures.image } ] }, { "type": "megasas", "max_drive_per_controller": 16, "drives": [ { "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 0, "slot_number": 0, "page-file": page_file }, { "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "12345678", "version": "M001", "wwn": "0x5000C500852E3141", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 1 } ] } ] with open(tmp_conf_file, "w") as yaml_file: yaml.dump(conf, yaml_file, default_flow_style=False) node = model.CNode(conf) node.init() node.precheck() node.start() path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = { "execute": "qmp_capabilities" } s.send(json.dumps(payload_enable_qmp)) s.recv() # wait until system is ready for ssh. ssh = sshclient.SSH(host="127.0.0.1", username="******", password="******", port=2222) ssh.wait_for_host_up()
def start_node(node_type): """ create two drive for comparasion. First drive has additional page, second doesn't """ global conf global tmp_conf_file global ssh # create a empty image for test. os.system("touch {0}".format(test_drive_image)) fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["type"] = node_type conf["compute"]["storage_backend"] = [ { "type": "ahci", "max_drive_per_controller": 6, "drives": [ { "size": 8, "file": test_img_file } ] }, { "type": "megasas", "max_drive_per_controller": 16, "drives": [ { "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 0, "slot_number": 0, "page-file": page_file }, { "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "12345678", "version": "M001", "wwn": "0x5000C500852E3141", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 1 } ] } ] with open(tmp_conf_file, "w") as yaml_file: yaml.dump(conf, yaml_file, default_flow_style=False) node = model.CNode(conf) node.init() node.precheck() node.start() time.sleep(3) # Port forward from guest 22 to host 2222 path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = { "execute": "qmp_capabilities" } s.send(json.dumps(payload_enable_qmp)) s.recv() payload_port_forward = { "execute":"human-monitor-command", "arguments": { "command-line": "hostfwd_add ::2222-:22" } } s.send(json.dumps(payload_port_forward)) s.recv() s.close() time.sleep(3) # wait until system is ready for ssh. ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) paramiko.util.log_to_file("filename.log") helper.try_func(600, paramiko.SSHClient.connect, ssh, "127.0.0.1", port=2222, username="******", password="******", timeout=120) ssh.close()
def test_step1_nvmedrive_remove(self): ssh = sshclient.SSH("127.0.0.1", "root", "root", port=2222) assert ssh.wait_for_host_up() is True s = UnixSocket(path) s.connect() s.recv() # 1.1: remove drive status, stdout = ssh.exec_command('nvme list') self.assertIn('0400001C6BB4', stdout, "Failed: didn't find dev-nvme-1") payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() payload_drive_remove = { "execute": "human-monitor-command", "arguments": { "command-line": "device_del dev-nvme-1" } } s.send(json.dumps(payload_drive_remove)) s.close() # around 10s is necessary for refresh the device list time.sleep(10) status, stdout = ssh.exec_command('nvme list') self.assertNotIn('0400001C6BB4', stdout, "NVME drive remove failed")