def test_controller_with_drive6(self): # Update megasas controller with six drives self.conf["compute"]["storage_backend"] = [{ "type": "megasas", "use_msi": "true", "max_cmds": 1024, "max_sge": 128, "max_drive_per_controller": 6, "drives": drive6 }] self.conf['compute']['storage_backend'].insert( 0, { "type": "ahci", "max_drive_per_controller": 6, "drives": [{ 'file': fixtures.image, 'bootindex': 1, 'use_msi': 'true', 'size': 8 }] }) with open('/tmp/test.yml', 'w') as outfile: yaml.dump(self.conf, outfile, default_flow_style=False) os.system("infrasim config add test {}".format(tmp_conf_file)) node = model.CNode(self.conf) node.init() node.precheck() node.start() helper.port_forward(node) controller_type = run_command( "infrasim node info {} | grep -c megasas".format( self.conf["name"])) self.assertEqual(int(controller_type[1]), 1) qemu_pid = get_qemu_pid(node) qemu_cmdline = open( "/proc/{}/cmdline".format(qemu_pid)).read().replace("\x00", " ") assert "qemu-system-x86_64" in qemu_cmdline assert "/tmp/sda.img" in qemu_cmdline assert "/tmp/sdb.img" in qemu_cmdline assert "/tmp/sdc.img" in qemu_cmdline assert "/tmp/sdd.img" in qemu_cmdline assert "/tmp/sde.img" in qemu_cmdline assert "/tmp/sdf.img" in qemu_cmdline assert "format=qcow2" in qemu_cmdline storage_list = get_storage_list() megasas_info = None for c_map in storage_list: if c_map.get('name') == 'megaraid_sas': megasas_info = c_map break assert megasas_info assert len(megasas_info.get('disks')) == 6
def test_controller_with_drive2(self): # Update ahci controller with two drives self.conf["compute"]["storage_backend"] = [{ "type": "ahci", "use_msi": "true", "max_cmds": 1024, "max_sge": 128, "max_drive_per_controller": 6, "drives": drive2 }] with open('/tmp/test.yml', 'w') as outfile: yaml.dump(self.conf, outfile, default_flow_style=False) os.system("infrasim config add test {}".format(tmp_conf_file)) node = model.CNode(self.conf) node.init() node.precheck() node.start() helper.port_forward(node) controller_type = run_command( "infrasim node info {} | grep -c ahci".format(self.conf["name"])) self.assertEqual(int(controller_type[1]), 1) qemu_pid = get_qemu_pid(node) qemu_cmdline = open( "/proc/{}/cmdline".format(qemu_pid)).read().replace("\x00", " ") assert "qemu-system-x86_64" in qemu_cmdline assert "/tmp/sda.img" in qemu_cmdline assert "/tmp/sdb.img" in qemu_cmdline assert "format=qcow2" in qemu_cmdline
def start_node(node_type): global conf global tmp_conf_file fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["type"] = node_type # Speed up booting by going to hdd directly conf["compute"]["boot"]["boot_order"] = "c" conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{"size": 8, "file": fixtures.image}] }] with open(tmp_conf_file, "w") as yaml_file: yaml.dump(conf, yaml_file, default_flow_style=False) node = model.CNode(conf) node.init() node.precheck() node.start() node.wait_node_up() helper.port_forward(node) global ssh ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) paramiko.util.log_to_file("filename.log") helper.try_func(600, paramiko.SSHClient.connect, ssh, "127.0.0.1", port=2222, username="******", password="******", timeout=300) time.sleep(5)
def test_controller_with_drive2(self): # Update ahci controller with two drives self.conf["compute"]["storage_backend"] = [{ "type": "ahci", "use_msi": "true", "max_cmds": 1024, "max_sge": 128, "max_drive_per_controller": 6, "drives": drive2 }] with open('/tmp/test.yml', 'w') as outfile: yaml.dump(self.conf, outfile, default_flow_style=False) os.system("infrasim config add test {}".format(tmp_conf_file)) node = model.CNode(self.conf) node.init() node.precheck() node.start() helper.port_forward(node) controller_type = run_command("infrasim node info {} | grep -c ahci". format(self.conf["name"])) self.assertEqual(int(controller_type[1]), 1) qemu_pid = get_qemu_pid(node) qemu_cmdline = open("/proc/{}/cmdline".format(qemu_pid)).read().replace("\x00", " ") qemu_cmdline = helper.get_full_qemu_cmd(qemu_cmdline) assert "qemu-system-x86_64" in qemu_cmdline assert "/tmp/sda.img" in qemu_cmdline assert "/tmp/sdb.img" in qemu_cmdline assert "format=qcow2" in qemu_cmdline
def test_controller_with_drive6(self): # Update lsi controller with six drives self.conf["compute"]["storage_backend"] = [{ "type": "lsi", "use_msi": "true", "max_cmds": 1024, "max_sge": 128, "max_drive_per_controller": 6, "drives": drive6 }] self.conf['compute']['storage_backend'].insert(0, { "type": "ahci", "max_drive_per_controller": 6, "drives": [ { 'file': fixtures.image, 'bootindex': 1, 'use_msi': 'true', 'size': 8 } ] }) with open('/tmp/test.yml', 'w') as outfile: yaml.dump(self.conf, outfile, default_flow_style=False) os.system("infrasim config add test {}".format(tmp_conf_file)) node = model.CNode(self.conf) node.init() node.precheck() node.start() helper.port_forward(node) controller_type = run_command("infrasim node info {} | grep -c lsi". format(self.conf["name"])) self.assertEqual(int(controller_type[1]), 1) qemu_pid = get_qemu_pid(node) qemu_cmdline = open("/proc/{}/cmdline".format(qemu_pid)).read().replace("\x00", " ") qemu_cmdline = helper.get_full_qemu_cmd(qemu_cmdline) assert "qemu-system-x86_64" in qemu_cmdline assert "/tmp/sda.img" in qemu_cmdline assert "/tmp/sdb.img" in qemu_cmdline assert "/tmp/sdc.img" in qemu_cmdline assert "/tmp/sdd.img" in qemu_cmdline assert "/tmp/sde.img" in qemu_cmdline assert "/tmp/sdf.img" in qemu_cmdline assert "format=qcow2" in qemu_cmdline storage_list = get_storage_list() lsi_info = None for c_map in storage_list: if c_map.get('name') == 'sym53c8xx': lsi_info = c_map break assert lsi_info assert len(lsi_info.get('disks')) == 6
def start_node(conf): global ssh node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) ssh = helper.prepare_ssh("127.0.0.1", 2222, "root", "root")
def start_node_directly(): global conf global tmp_conf_file global ssh os.system("touch {0}".format(test_drive_directly_image)) fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["boot"] = {"boot_order": "c"} conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, { "type": "lsisas3008", "max_drive_per_controller": 32, "drives": [{ "file": test_drive_directly_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 0, "slot_number": 0 }, { "file": test_drive_directly_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "12345678", "version": "M001", "wwn": "0x5000C500852E3141", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 1 }] }] node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) ssh = helper.prepare_ssh()
def start_node(): global conf nvme_config = fixtures.NvmeConfig() conf = nvme_config.get_node_info() node = model.CNode(conf) node.init() node.precheck() node.start() time.sleep(3) helper.port_forward(node)
def start_node(): global conf nvme_config = fixtures.NvmeConfig() conf = nvme_config.get_node_info() node = model.CNode(conf) node.init() node.precheck() node.start() time.sleep(10) helper.port_forward(node)
def start_node(): global conf global path nvme_config = fixtures.NvmeConfig() conf = nvme_config.get_node_info() node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) path = os.path.join(node.workspace.get_workspace(), ".monitor")
def start_node(): global conf global path nvme_config = fixtures.NvmeConfig() conf = nvme_config.get_node_info() conf["compute"]["boot"] = { "boot_order": "c" } conf["compute"]["memory"] = { "size": 4096 } conf["compute"]["storage_backend"] = [ { "type": "lsi", "max_drive_per_controller": 6, "drives": [ { "size": 10, "model": "SATADOM", "serial": "20160518AA851134100", "file": fixtures.image }, { "format": "raw", "size": 1, "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 0, "sector_size": 520, "file": drive_test_image } ] }] conf["compute"]["networks"] = [ { "bus": "pcie.0", "device": "e1000", "mac": "52:54:be:b9:77:dd", "network_mode": "nat", "network_name": "dummy0", }] node = model.CNode(conf) node.init() node.precheck() node.start() node.wait_node_up(timeout=20) helper.port_forward(node) path = os.path.join(node.workspace.get_workspace(), ".monitor")
def start_node(): global conf global path global ssh nvme_config = fixtures.NvmeConfig() conf = nvme_config.get_node_info() node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) path = os.path.join(node.workspace.get_workspace(), ".monitor") ssh = helper.prepare_ssh("127.0.0.1")
def start_node(): global conf nvme_config = fixtures.NvmeConfig() conf = nvme_config.get_node_info() node = model.CNode(conf) node.init() node.precheck() node.start() node.wait_node_up() helper.port_forward(node) global ssh ssh = sshclient.SSH("127.0.0.1", "root", "root", port=2222) assert ssh.wait_for_host_up() is True
def start_node(): global conf global path nvme_config = fixtures.NvmeConfig() conf = nvme_config.get_node_info() conf["compute"]["boot"] = {"boot_order": "c"} conf["compute"]["memory"] = {"size": 4096} conf["compute"]["storage_backend"] = [{ "type": "lsi", "max_drive_per_controller": 6, "drives": [{ "size": 10, "model": "SATADOM", "serial": "20160518AA851134100", "file": fixtures.image }, { "format": "raw", "size": 1, "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 0, "sector_size": 520, "file": drive_test_image }] }] conf["compute"]["networks"] = [{ "bus": "pcie.0", "device": "e1000", "mac": "52:54:be:b9:77:dd", "network_mode": "nat", "network_name": "dummy0", }] node = model.CNode(conf) node.init() node.precheck() node.start() node.wait_node_up(timeout=20) helper.port_forward(node) path = os.path.join(node.workspace.get_workspace(), ".monitor")
def start_node(self): global conf global sas_drive_serial global sata_drive_serial global boot_drive_serial fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf['compute']['storage_backend'] = [{ 'type': 'ahci', 'max_drive_per_controller': 6, 'drives': [ {'size': 8, 'file': test_img_file, 'boot_index': 1, 'serial': boot_drive_serial}, {'size': 4, 'file': '/tmp/sdb.img', 'format': 'raw', 'serial': sata_drive_serial}, {'size': 8, 'file': '/tmp/sdc.img', 'format': 'raw'}, {'size': 8, 'file': '/tmp/sdd.img', 'format': 'raw'}, {'size': 8, 'file': '/tmp/sde.img', 'format': 'raw'}, {'size': 8, 'file': '/tmp/sdf.img', 'format': 'raw'} ] }, { 'type': 'megasas-gen2', 'max_drive_per_controller': 6, 'drives': [ {'size': 4, 'file': '/tmp/sdg.img', 'format': 'raw', 'serial': sas_drive_serial}, {'size': 8, 'file': '/tmp/sdh.img', 'format': 'raw'}, {'size': 8, 'file': '/tmp/sdi.img', 'format': 'raw'}, {'size': 8, 'file': '/tmp/sdj.img', 'format': 'raw'}, {'size': 8, 'file': '/tmp/sdk.img', 'format': 'raw'}, {'size': 8, 'file': '/tmp/sdl.img', 'format': 'raw'}] }] node = model.CNode(conf) node.init() node.precheck() node.start() time.sleep(3) helper.port_forward(node)
def test_four_controllers_each_with_six_drives(self): image_path = "{}/{}".format(config.infrasim_home, self.conf["name"]) # Add several storage controllers/drives in node config file. drives = [] drives.append({'size': 16, 'file': "{}/sdb.img".format(image_path)}) drives.append({'size': 8, 'file': "{}/sdc.img".format(image_path)}) drives.append({'size': 16, 'file': "{}/sdd.img".format(image_path)}) drives.append({'size': 8, 'file': "{}/sde.img".format(image_path)}) drives.append({'size': 16, 'file': "{}/sdf.img".format(image_path)}) self.conf['compute']['storage_backend'][0]['drives'].extend(drives) self.conf['compute']['storage_backend'][0]['drives'][0]['file'] = fixtures.image controllers = [] controllers.append({ 'type': 'megasas', 'use_msi': 'true', 'max_cmds': 1024, 'max_sge': 128, 'drives': [], 'max_drive_per_controller': 6 }) controllers.append({ 'type': 'lsi', 'use_msi': 'true', 'max_cmds': 1024, 'max_sge': 128, 'drives': [], 'max_drive_per_controller': 6 }) controllers.append({ 'type': 'ahci', 'use_msi': 'true', 'max_cmds': 1024, 'max_sge': 128, 'drives': [], 'max_drive_per_controller': 6 }) self.conf['compute']['storage_backend'].extend(controllers) drives1 = [] drives1.append({'size': 8, 'file': "{}/sdg.img".format(image_path)}) drives1.append({'size': 16, 'file': "{}/sdh.img".format(image_path)}) drives1.append({'size': 8, 'file': "{}/sdi.img".format(image_path)}) drives1.append({'size': 16, 'file': "{}/sdj.img".format(image_path)}) drives1.append({'size': 8, 'file': "{}/sdk.img".format(image_path)}) drives1.append({'size': 16, 'file': "{}/sdl.img".format(image_path)}) self.conf['compute']['storage_backend'][1]['drives'].extend(drives1) drives2 = [] drives2.append({'size': 8, 'file': "{}/sdm.img".format(image_path)}) drives2.append({'size': 16, 'file': "{}/sdn.img".format(image_path)}) drives2.append({'size': 8, 'file': "{}/sdo.img".format(image_path)}) drives2.append({'size': 16, 'file': "{}/sdp.img".format(image_path)}) drives2.append({'size': 8, 'file': "{}/sdq.img".format(image_path)}) drives2.append({'size': 16, 'file': "{}/sdr.img".format(image_path)}) self.conf['compute']['storage_backend'][2]['drives'].extend(drives2) drives3 = [] drives3.append({'size': 8, 'file': "{}/sds.img".format(image_path)}) drives3.append({'size': 16, 'file': "{}/sdt.img".format(image_path)}) drives3.append({'size': 8, 'file': "{}/sdu.img".format(image_path)}) drives3.append({'size': 16, 'file': "{}/sdv.img".format(image_path)}) drives3.append({'size': 8, 'file': "{}/sdw.img".format(image_path)}) drives3.append({'size': 16, 'file': "{}/sdx.img".format(image_path)}) self.conf['compute']['storage_backend'][3]['drives'].extend(drives3) with open('/tmp/test.yml', 'w') as outfile: yaml.dump(self.conf, outfile, default_flow_style=False) os.system("infrasim config add {} {}".format(self.conf["name"], tmp_conf_file)) node = model.CNode(self.conf) node.init() node.precheck() node.start() helper.port_forward(node) controller_type_ahci = run_command("infrasim node info {} | grep -c ahci". format(self.conf["name"])) controller_type_megasas = run_command("infrasim node info {} | grep -c megasas". format(self.conf["name"])) controller_type_lsi = run_command("infrasim node info {} | grep -c lsi". format(self.conf["name"])) self.assertEqual(int(controller_type_ahci[1]), 2) self.assertEqual(int(controller_type_megasas[1]), 1) self.assertEqual(int(controller_type_lsi[1]), 1) qemu_pid = get_qemu_pid(node) qemu_cmdline = open("/proc/{}/cmdline".format(qemu_pid)).read().replace("\x00", " ") qemu_cmdline = helper.get_full_qemu_cmd(qemu_cmdline) assert "qemu-system-x86_64" in qemu_cmdline assert "format=qcow2" in qemu_cmdline storage_list = get_storage_list() # Only has three types of controller assert len(storage_list) == 3 for c_map in storage_list: if c_map.get('name') == 'ahci': assert len(c_map.get('buses')) == 2 * 6 # 2 controllers * 6 ports per controller assert len(c_map.get('disks')) == 12 elif c_map.get('name') == 'megaraid_sas' or c_map.get('name') == 'sym53c8xx': assert len(c_map.get('buses')) == 1 assert len(c_map.get('disks')) == 6 else: assert False
def start_node(node_type): """ create two drive for comparasion. First drive has additional page, second doesn't """ global conf global tmp_conf_file global ssh fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["boot"] = {"boot_order": "c"} conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": test_img_file }] }, { "type": "lsisas3008", "max_drive_per_controller": 32, "connectors": [{ "phy": 0, "wwn": 5764824129059291136, "atta_enclosure": "enclosure_0", "atta_exp": "lcc-a", "atta_port": 0 }, { "phy": 4, "wwn": 5764824129059291137, "atta_enclosure": "enclosure_0", "atta_exp": "lcc-b", "atta_port": 0 }] }, { "type": "disk_array", "disk_array": [{ "enclosure": { "type": 28, "drives": [{ "repeat": 8, "start_phy_id": 12, "format": "raw", "share-rw": "true", "version": "B29C", "file": "/tmp/topo/sda{}.img", "slot_number": 0, "serial": "ZABCD{}", "wwn": wwn_drv }], "expanders": [{ "phy_count": 36, "wwn": wwn_exp0, "ports": [{ "phy": 0, "id": 0, "number": 4 }, { "phy": 4, "id": 1, "number": 4 }], "side": 0, "name": "lcc-a", "ses": { "buffer_data": "/home/infrasim/workspace/bins/buffer.bin" } }, { "phy_count": 36, "wwn": wwn_exp1, "ports": [{ "phy": 0, "id": 0, "number": 4 }, { "phy": 4, "id": 1, "number": 4 }], "side": 1, "name": "lcc-b", "ses": { "buffer_data": "/home/infrasim/workspace/bins/buffer.bin" } }] }, "name": "enclosure_0" }] }] node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) ssh = helper.prepare_ssh()
def start_node(node_type): """ create two drive for comparasion. First drive has additional page, second doesn't """ global conf global tmp_conf_file global ssh global s # create a empty image for test. os.system("touch {0}".format(test_drive_image)) fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["type"] = node_type conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, { "type": "megasas", "max_drive_per_controller": 16, "drives": [{ "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 0, "slot_number": 0, "page-file": page_file }, { "file": test_drive_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "12345678", "version": "M001", "wwn": "0x5000C500852E3141", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 1 }] }] with open(tmp_conf_file, "w") as yaml_file: yaml.dump(conf, yaml_file, default_flow_style=False) node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() # wait until system is ready for ssh. ssh = sshclient.SSH(host="127.0.0.1", username="******", password="******", port=2222) ssh.wait_for_host_up()
def test_file_existance_after_node_restart(self): global conf # Write disk ssh = helper.prepare_ssh() stdin, stdout, stderr = ssh.exec_command('touch /root/source.bin') while not stdout.channel.exit_status_ready(): pass stdin, stdout, stderr = ssh.exec_command( "echo 'Test message is found! :D' >> /root/source.bin") while not stdout.channel.exit_status_ready(): pass # FIXME: close ssh is walk around to issue of ssh connection go inactive # which seems like a paramiko issue? So as other ssh.close() in file. ssh.close() # FIXME drive = self.get_drive(sas_drive_serial) ssh = helper.prepare_ssh() stdin, stdout, stderr = ssh.exec_command( 'dd if=/root/source.bin of=/dev/' + drive + ' bs=512 seek=0 count=1 conv=fsync') while not stdout.channel.exit_status_ready(): pass ssh.close() # Check disk content intact after node restart run_command("infrasim node restart {}".format(conf["name"])) node = model.CNode(conf) node.init() helper.port_forward(node) ssh = helper.prepare_ssh() stdin, stdout, stderr = ssh.exec_command('touch /root/source.bin') while not stdout.channel.exit_status_ready(): pass stdin, stdout, stderr = ssh.exec_command( "echo 'Test message is found! :D' >> /root/source.bin") while not stdout.channel.exit_status_ready(): pass stdin, stdout, stderr = ssh.exec_command( 'dd if=/dev/' + drive + ' of=/root/target.bin bs=512 skip=0 count=1 conv=fsync') while not stdout.channel.exit_status_ready(): pass ssh.close() ssh = helper.prepare_ssh() stdin, stdout, stderr = ssh.exec_command( 'diff /root/source.bin /root/target.bin -B') while not stdout.channel.exit_status_ready(): pass lines = stdout.channel.recv(2048) print('Expect lines="", Actual lines="{}"'.format(lines)) assert lines is '' ssh.close() ssh = helper.prepare_ssh() stdin, stdout, stderr = ssh.exec_command('rm /root/target.bin') while not stdout.channel.exit_status_ready(): pass stdin, stdout, stderr = ssh.exec_command('ls /root') while not stdout.channel.exit_status_ready(): pass lines = stdout.channel.recv(2048) assert 'target.bin' not in lines stdin, stdout, stderr = ssh.exec_command('rm /root/source.bin') while not stdout.channel.exit_status_ready(): pass stdin, stdout, stderr = ssh.exec_command('ls /root') while not stdout.channel.exit_status_ready(): pass lines = stdout.channel.recv(2048) ssh.close() assert 'source.bin' not in lines
def start_node(self): global conf global sas_drive_serial global sata_drive_serial global boot_drive_serial fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf['compute']['storage_backend'] = [{ 'type': 'ahci', 'max_drive_per_controller': 6, 'drives': [{ 'size': 8, 'file': test_img_file, 'boot_index': 1, 'serial': boot_drive_serial }, { 'size': 4, 'file': '/tmp/sdb.img', 'format': 'raw', 'serial': sata_drive_serial }, { 'size': 8, 'file': '/tmp/sdc.img', 'format': 'raw' }, { 'size': 8, 'file': '/tmp/sdd.img', 'format': 'raw' }, { 'size': 8, 'file': '/tmp/sde.img', 'format': 'raw' }, { 'size': 8, 'file': '/tmp/sdf.img', 'format': 'raw' }] }, { 'type': 'megasas-gen2', 'max_drive_per_controller': 6, 'drives': [{ 'size': 4, 'file': '/tmp/sdg.img', 'format': 'raw', 'serial': sas_drive_serial }, { 'size': 8, 'file': '/tmp/sdh.img', 'format': 'raw' }, { 'size': 8, 'file': '/tmp/sdi.img', 'format': 'raw' }, { 'size': 8, 'file': '/tmp/sdj.img', 'format': 'raw' }, { 'size': 8, 'file': '/tmp/sdk.img', 'format': 'raw' }, { 'size': 8, 'file': '/tmp/sdl.img', 'format': 'raw' }] }] node = model.CNode(conf) node.init() node.precheck() node.start() time.sleep(3) helper.port_forward(node)
def start_node_enclosure(): global ssh global conf fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["boot"] = {"boot_order": "c"} conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, { "type": "lsisas3008", "sas_address": 5764824129059291136, "max_drive_per_controller": 32, "connectors": [{ "phy": 0, "wwn": 5764824129059291136, "atta_enclosure": "enclosure_0", "atta_exp": "lcc-a", "atta_port": 0 }, { "phy": 4, "wwn": 5764824129059291137, "atta_enclosure": "enclosure_0", "atta_exp": "lcc-b", "atta_port": 0 }] }, { "type": "disk_array", "disk_array": [{ "enclosure": { "type": 28, "drives": [{ "repeat": drv_count, "start_phy_id": 12, "format": "raw", "share-rw": "true", "version": "B29C", "file": "/tmp/topo/sda{}.img", "slot_number": 0, "serial": "ZABCD{}", "wwn": wwn_drv }], "expanders": [{ "phy_count": 36, "wwn": wwn_exp0, "phy_map": "35-10,8,9", "ports": [{ "phy": 0, "id": 0, "number": 4 }, { "phy": 4, "id": 1, "number": 4 }], "side": 0, "name": "lcc-a", "ses": { "buffer_data": "/home/infrasim/workspace/bins/buffer.bin" } }, { "phy_count": 36, "wwn": wwn_exp1, "ports": [{ "phy": 0, "id": 0, "number": 4 }, { "phy": 4, "id": 1, "number": 4 }], "side": 1, "name": "lcc-b", "ses": { "buffer_data": "/home/infrasim/workspace/bins/buffer.bin" } }] }, "name": "enclosure_0" }, { "enclosure": { "type": 28, "drives": [{ "repeat": drv1_count, "start_phy_id": 12, "format": "raw", "share-rw": "true", "version": "B29C", "file": "/tmp/topo/sdb{}.img", "slot_number": 0, "serial": "ZABCE{}", "wwn": wwn_drv1 }], "expanders": [{ "phy_count": 36, "wwn": wwn_exp2, "ports": [{ "phy": 0, "id": 0, "number": 4 }, { "phy": 4, "id": 1, "number": 4 }], "side": 0, "name": "lcc-a", "ses": { "buffer_data": "/home/infrasim/workspace/bins/buffer.bin" } }, { "phy_count": 36, "wwn": wwn_exp3, "ports": [{ "phy": 0, "id": 0, "number": 4 }, { "phy": 4, "id": 1, "number": 4 }], "side": 1, "name": "lcc-b", "ses": { "buffer_data": "/home/infrasim/workspace/bins/buffer.bin" } }] }, "name": "enclosure_1" }, { "connections": [{ "link": [{ "disk_array": "enclosure_0", "exp": "lcc-a", "number": 4, "phy": 4 }, { "disk_array": "enclosure_1", "exp": "lcc-a", "number": 4, "phy": 0 }] }, { "link": [{ "disk_array": "enclosure_0", "exp": "lcc-b", "number": 4, "phy": 4 }, { "disk_array": "enclosure_1", "exp": "lcc-b", "number": 4, "phy": 0 }] }] }] }] node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) ssh = helper.prepare_ssh()
def set_port_forward_try_ssh(node): helper.port_forward(node) ssh = helper.prepare_ssh() ssh.close() time.sleep(5)
def start_node(node_type): """ create two drive for comparasion. First drive has additional page, second doesn't """ global conf global tmp_conf_file global ssh fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["boot"] = { "boot_order": "c" } conf["compute"]["storage_backend"] = [ { "type": "ahci", "max_drive_per_controller": 6, "drives": [ { "size": 8, "file": test_img_file } ] }, { "type": "lsisas3008", "max_drive_per_controller": 32, "connectors": [ { "phy": 0, "wwn": 5764824129059291136, "atta_enclosure": "enclosure_0", "atta_exp": "lcc-a", "atta_port": 0 }, { "phy": 4, "wwn": 5764824129059291137, "atta_enclosure": "enclosure_0", "atta_exp": "lcc-b", "atta_port": 0 } ] }, { "type": "disk_array", "disk_array": [ { "enclosure": { "type": 28, "drives": [ { "repeat": 8, "start_phy_id": 12, "format": "raw", "share-rw": "true", "version": "B29C", "file": "/tmp/topo/sda{}.img", "slot_number": 0, "serial": "ZABCD{}", "wwn": wwn_drv } ], "expanders": [ { "phy_count": 36, "wwn": wwn_exp0, "ports": [ { "phy": 0, "id": 0, "number": 4 }, { "phy": 4, "id": 1, "number": 4 } ], "side": 0, "name": "lcc-a", "ses": { "buffer_data": "/home/infrasim/workspace/bins/buffer.bin" } }, { "phy_count": 36, "wwn": wwn_exp1, "ports": [ { "phy": 0, "id": 0, "number": 4 }, { "phy": 4, "id": 1, "number": 4 } ], "side": 1, "name": "lcc-b", "ses": { "buffer_data": "/home/infrasim/workspace/bins/buffer.bin" } } ] }, "name": "enclosure_0" } ] } ] node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) ssh = helper.prepare_ssh()
def test_four_controllers_each_with_six_drives(self): image_path = "{}/{}".format(config.infrasim_home, self.conf["name"]) # Add several storage controllers/drives in node config file. drives = [] drives.append({'size': 16, 'file': "{}/sdb.img".format(image_path)}) drives.append({'size': 8, 'file': "{}/sdc.img".format(image_path)}) drives.append({'size': 16, 'file': "{}/sdd.img".format(image_path)}) drives.append({'size': 8, 'file': "{}/sde.img".format(image_path)}) drives.append({'size': 16, 'file': "{}/sdf.img".format(image_path)}) self.conf['compute']['storage_backend'][0]['drives'].extend(drives) self.conf['compute']['storage_backend'][0]['drives'][0][ 'file'] = fixtures.image controllers = [] controllers.append({ 'type': 'megasas', 'use_msi': 'true', 'max_cmds': 1024, 'max_sge': 128, 'drives': [], 'max_drive_per_controller': 6 }) controllers.append({ 'type': 'lsi', 'use_msi': 'true', 'max_cmds': 1024, 'max_sge': 128, 'drives': [], 'max_drive_per_controller': 6 }) controllers.append({ 'type': 'ahci', 'use_msi': 'true', 'max_cmds': 1024, 'max_sge': 128, 'drives': [], 'max_drive_per_controller': 6 }) self.conf['compute']['storage_backend'].extend(controllers) drives1 = [] drives1.append({'size': 8, 'file': "{}/sdg.img".format(image_path)}) drives1.append({'size': 16, 'file': "{}/sdh.img".format(image_path)}) drives1.append({'size': 8, 'file': "{}/sdi.img".format(image_path)}) drives1.append({'size': 16, 'file': "{}/sdj.img".format(image_path)}) drives1.append({'size': 8, 'file': "{}/sdk.img".format(image_path)}) drives1.append({'size': 16, 'file': "{}/sdl.img".format(image_path)}) self.conf['compute']['storage_backend'][1]['drives'].extend(drives1) drives2 = [] drives2.append({'size': 8, 'file': "{}/sdm.img".format(image_path)}) drives2.append({'size': 16, 'file': "{}/sdn.img".format(image_path)}) drives2.append({'size': 8, 'file': "{}/sdo.img".format(image_path)}) drives2.append({'size': 16, 'file': "{}/sdp.img".format(image_path)}) drives2.append({'size': 8, 'file': "{}/sdq.img".format(image_path)}) drives2.append({'size': 16, 'file': "{}/sdr.img".format(image_path)}) self.conf['compute']['storage_backend'][2]['drives'].extend(drives2) drives3 = [] drives3.append({'size': 8, 'file': "{}/sds.img".format(image_path)}) drives3.append({'size': 16, 'file': "{}/sdt.img".format(image_path)}) drives3.append({'size': 8, 'file': "{}/sdu.img".format(image_path)}) drives3.append({'size': 16, 'file': "{}/sdv.img".format(image_path)}) drives3.append({'size': 8, 'file': "{}/sdw.img".format(image_path)}) drives3.append({'size': 16, 'file': "{}/sdx.img".format(image_path)}) self.conf['compute']['storage_backend'][3]['drives'].extend(drives3) with open('/tmp/test.yml', 'w') as outfile: yaml.dump(self.conf, outfile, default_flow_style=False) os.system("infrasim config add {} {}".format(self.conf["name"], tmp_conf_file)) node = model.CNode(self.conf) node.init() node.precheck() node.start() helper.port_forward(node) controller_type_ahci = run_command( "infrasim node info {} | grep -c ahci".format(self.conf["name"])) controller_type_megasas = run_command( "infrasim node info {} | grep -c megasas".format( self.conf["name"])) controller_type_lsi = run_command( "infrasim node info {} | grep -c lsi".format(self.conf["name"])) self.assertEqual(int(controller_type_ahci[1]), 2) self.assertEqual(int(controller_type_megasas[1]), 1) self.assertEqual(int(controller_type_lsi[1]), 1) qemu_pid = get_qemu_pid(node) qemu_cmdline = open( "/proc/{}/cmdline".format(qemu_pid)).read().replace("\x00", " ") assert "qemu-system-x86_64" in qemu_cmdline assert "format=qcow2" in qemu_cmdline storage_list = get_storage_list() # Only has three types of controller assert len(storage_list) == 3 for c_map in storage_list: if c_map.get('name') == 'ahci': assert len(c_map.get('buses') ) == 2 * 6 # 2 controllers * 6 ports per controller assert len(c_map.get('disks')) == 12 elif c_map.get('name') == 'megaraid_sas' or c_map.get( 'name') == 'sym53c8xx': assert len(c_map.get('buses')) == 1 assert len(c_map.get('disks')) == 6 else: assert False
def start_node(): global conf global path nvme_config = fixtures.NvmeConfig() conf = nvme_config.get_node_info() conf["compute"]["boot"] = {"boot_order": "c"} conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 40, "model": "SATADOM", "serial": "20160518AA851134100", "file": fixtures.image }] }, { "cmb_size_mb": 1, "size": 128, "lba_index": 0, "namespaces": 2, "serial": "0400001C1FFA", "bus": "downstream2", "type": "nvme", "oncs": "0xf" }, { "cmb_size_mb": 1, "size": 128, "lba_index": 0, "namespaces": 3, "bus": "downstream3", "serial": "0400001C6BB4", "type": "nvme", "oncs": "0xf" }] conf["compute"]["networks"] = [{ "bus": "downstream1", "device": "e1000", "mac": "52:54:be:b9:77:dd", "network_mode": "nat", "network_name": "dummy0" }] conf["compute"]["pcie_topology"] = { "root_port": [{ "addr": "7.0", "bus": "pcie.0", "chassis": 1, "device": "ioh3420", "id": "root_port1", "pri_bus": 0, "sec_bus": 40, "slot": 2 }, { "addr": "8.0", "bus": "pcie.0", "chassis": 1, "device": "ioh3420", "id": "root_port2", "pri_bus": 0, "sec_bus": 60, "slot": 3 }], "switch": [{ "downstream": [{ "addr": "2.0", "bus": "upstream1", "chassis": 1, "device": "xio3130-downstream", "id": "downstream1", "slot": 190, "pri_bus": 41, "sec_bus": 42 }, { "addr": "3.0", "bus": "upstream1", "chassis": 1, "device": "xio3130-downstream", "id": "downstream2", "slot": 160, "pri_bus": 41, "sec_bus": 43 }], "upstream": [{ "bus": "root_port1", "device": "x3130-upstream", "id": "upstream1" }] }, { "downstream": [{ "addr": "2.0", "bus": "upstream2", "chassis": 1, "device": "xio3130-downstream", "id": "downstream3", "slot": 193, "pri_bus": 61, "sec_bus": 62 }, { "addr": "3.0", "bus": "upstream2", "chassis": 1, "device": "xio3130-downstream", "id": "downstream4", "slot": 164, "pri_bus": 61, "sec_bus": 63 }], "upstream": [{ "bus": "root_port2", "device": "x3130-upstream", "id": "upstream2" }] }] } node = model.CNode(conf) node.init() node.precheck() node.start() node.wait_node_up(timeout=5) helper.port_forward(node) path = os.path.join(node.workspace.get_workspace(), ".monitor")