def setUpClass(cls): node_info = {} fake_config = fixtures.FakeConfig() node_info = fake_config.get_node_info() node_info["ipmi_console_port"] = 9100 node_info["ipmi_console_ssh"] = 9400 cls.bmc_conf = os.path.join(os.environ["HOME"], ".infrasim", node_info["name"], "etc", "vbmc.conf") with open(cls.TMP_CONF_FILE, "w") as f: yaml.dump(node_info, f, default_flow_style=False) node = CNode(node_info) node.init() node.precheck() node.start() # Wait ipmi_sim sever coming up. # FIXME: good way??? print "Wait ipmi-console start in about 15s..." time.sleep(15) ipmi_console_thread = threading.Thread(target=ipmiconsole.start, args=(node_info["name"], )) ipmi_console_thread.setDaemon(True) ipmi_console_thread.start() # ipmiconsole.start(node_info["name"]) # Wait SSH server coming up # FIXME: Need a good way to check if SSH server is listening # on port 9300 time.sleep(20)
def start_node(node_type): global conf global tmp_conf_file fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["type"] = node_type # Speed up booting by going to hdd directly conf["compute"]["boot"]["boot_order"] = "c" conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{"size": 8, "file": fixtures.image}] }] with open(tmp_conf_file, "w") as yaml_file: yaml.dump(conf, yaml_file, default_flow_style=False) node = model.CNode(conf) node.init() node.precheck() node.start() node.wait_node_up() helper.port_forward(node) global ssh ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) paramiko.util.log_to_file("filename.log") helper.try_func(600, paramiko.SSHClient.connect, ssh, "127.0.0.1", port=2222, username="******", password="******", timeout=300) time.sleep(5)
def tearDownClass(cls): fake_config = fixtures.FakeConfig() node_info = fake_config.get_node_info() node = model.CNode(node_info) node.init() node.stop() node.terminate_workspace()
def setUpClass(cls): fake_config = fixtures.FakeConfig() cls.conf = fake_config.get_node_info() cls.node_root = os.path.join(config.infrasim_home, cls.conf["name"]) # Add several strage controllers and drives to node config file. drives = [] drives.append({'size': 8, 'file': "{}/sdb.img".format(cls.node_root)}) drives.append({'size': 8, 'file': "{}/sdc.img".format(cls.node_root)}) drives.append({'size': 8, 'file': "{}/sdd.img".format(cls.node_root)}) drives.append({'size': 8, 'file': "{}/sde.img".format(cls.node_root)}) drives.append({'size': 8, 'file': "{}/sdf.img".format(cls.node_root)}) cls.conf['compute']['storage_backend'][0]['drives'].extend(drives) cls.conf['compute']['storage_backend'][0]['drives'][0]['file'] = "{}/sda.img".format(cls.node_root) controllers = [] controllers.append({'type': 'ahci', 'drives': [], 'max_drive_per_controller': 6}) controllers.append({'type': 'ahci', 'drives': [], 'max_drive_per_controller': 6}) cls.conf['compute']['storage_backend'].extend(controllers) drives1 = [] drives1.append({'size': 8, 'file': "{}/sdg.img".format(cls.node_root)}) drives1.append({'size': 8, 'file': "{}/sdh.img".format(cls.node_root)}) cls.conf['compute']['storage_backend'][1]['drives'].extend(drives1) drives2 = [] drives2.append({'size': 8, 'file': "{}/sdi.img".format(cls.node_root)}) drives2.append({'size': 8, 'file': "{}/sdj.img".format(cls.node_root)}) cls.conf['compute']['storage_backend'][2]['drives'].extend(drives2) with open('/tmp/test.yml', 'w') as outfile: yaml.dump(cls.conf, outfile, default_flow_style=False) os.system("infrasim config add test /tmp/test.yml")
def start_node(): global conf global ssh fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["networks"][0]["port_forward"] = [{ "protocal": "tcp", "inside": 22, "outside": 2222 }] conf["compute"]["boot"] = {"boot_order": "c"} conf["compute"]["serial_number"] = "infrasim300" conf["compute"]["cpu"] = {"type": "Haswell", "quantities": 4} conf["compute"]["memory"] = {"size": 4096} conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 10, "file": fixtures.image }] }] node = model.CNode(conf) node.init() node.precheck() node.start() node.wait_node_up() ssh = helper.prepare_ssh()
def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() os.system("touch /tmp/test.iso") self.conf['compute']['cdrom'] = {} self.conf['compute']['cdrom']['file'] = "/tmp/test.iso" self.conf['compute']['boot'] = {'boot_order': 'ncd'} node = model.CNode(self.conf) node.init() node.precheck() node.start() node.wait_node_up() # Start sol in a subprocess self.fw = open('/tmp/test_sol', 'wb') self.p_sol = subprocess.Popen( "ipmitool -I lanplus -U admin -P admin " "-H 127.0.0.1 sol activate", shell=True, stdin=subprocess.PIPE, stdout=self.fw, stderr=self.fw, bufsize=0)
def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() self.conf['compute']['boot'] = { 'boot_order': 'ncd', 'splash-time': 20000, 'menu': 'on' } node = model.CNode(self.conf) node.init() node.precheck() node.start() time.sleep(3) # Start sol in a subprocess self.fw = open('/tmp/test_sol', 'wb') self.p_sol = subprocess.Popen( "ipmitool -I lanplus -U admin -P admin " "-H 127.0.0.1 sol activate", shell=True, stdin=subprocess.PIPE, stdout=self.fw, stderr=self.fw, bufsize=0)
def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() self.conf["type"] = "dell_c6320" self.old_path = os.environ.get("PATH") os.environ["PATH"] = "{}/bin:{}".format(os.environ.get("PYTHONPATH"), self.old_path)
def start_node(): global conf global ssh global s global sg fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["guest-agent"] = True conf["compute"]["networks"] = [{ "device": "e1000", "id": "e1000.0", "mac": "00:60:16:9c:50:6a" }] conf["compute"]["networks"][0]["port_forward"] = [{ "outside": 2222, "inside": 22, "protocal": "tcp" }] conf["compute"]["storage_backend"] = [ { "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, ] conf["compute"]["guest-agent"] = True node = model.CNode(conf) node.init() node.precheck() node.start() # first s : unixsocket .monitor path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() # second sg: unixsocket guest.agt path_guestagt = os.path.join(node.workspace.get_workspace(), "guest.agt") sg = UnixSocket(path_guestagt) sg.connect() payload_test_ping = {"execute": "guest-ping"} sg.send(json.dumps(payload_test_ping)) # wait until system is ready for ssh. ssh = sshclient.SSH(host="127.0.0.1", username="******", password="******", port=2222) ssh.wait_for_host_up()
def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() self.bmc_conf = os.path.join(os.environ["HOME"], ".infrasim", "test", "etc", "vbmc.conf") self.old_path = os.environ.get("PATH") os.environ["PATH"] = "{}/bin:{}".format(os.environ.get("PYTHONPATH"), self.old_path)
def setUp(): global conf fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() node = model.CNode(conf) node.set_node_name(conf['name']) with open(tmp_conf_file, "w") as f: yaml.dump(conf, f, default_flow_style=False)
def setUp(self): self.node_info = {} fake_config = fixtures.FakeConfig() self.node_info = fake_config.get_node_info() self.__node = model.CNode(self.node_info) self.__node.init() self.__node.precheck() self.__node.start()
def stop_node(): global conf fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() node = model.CNode(conf) node.init() node.stop() node.terminate_workspace() conf = None
def start_node_directly(): global conf global tmp_conf_file global ssh os.system("touch {0}".format(test_drive_directly_image)) fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["boot"] = {"boot_order": "c"} conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, { "type": "lsisas3008", "max_drive_per_controller": 32, "drives": [{ "file": test_drive_directly_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 0, "slot_number": 0 }, { "file": test_drive_directly_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "12345678", "version": "M001", "wwn": "0x5000C500852E3141", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 1 }] }] node = model.CNode(conf) node.init() node.precheck() node.start() helper.port_forward(node) ssh = helper.prepare_ssh()
def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() self.sol_outfile = "/tmp/test_sol" node = model.CNode(self.conf) node.init() node.precheck() node.start() time.sleep(3)
def setUp(self): self.node_info = {} fake_config = fixtures.FakeConfig() self.node_info = fake_config.get_node_info() node = model.CNode(self.node_info) node.init() node.precheck() node.start() # FIXME: sleep is not a good way to wait qemu starts up. time.sleep(3)
def setUpClass(cls): fake_config = fixtures.FakeConfig() cls.conf = fake_config.get_node_info() cls.WORKSPACE = "{}/{}".format(config.infrasim_home, cls.conf['name']) with open(TMP_CONF_FILE, 'w') as f_yml: yaml.dump(cls.conf, f_yml, default_flow_style=False) cls.node = model.CNode(cls.conf) cls.node.set_node_name(cls.conf['name']) socat.start_socat(conf_file=TMP_CONF_FILE)
def setUp(self): fake_config = fixtures.FakeConfig() self.node_info = fake_config.get_node_info() # start node test1 self.node_info['name'] = "test1" self.node_info["type"] = "quanta_d51" node1 = model.CNode(self.node_info) node1.init() node1.precheck() node1.start() time.sleep(2)
def start_node(): global conf global ssh global s fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["networks"] = [{ "device": "e1000", "id": "e1000.0", "mac": "00:60:16:9c:ff:6a", "network_mode": "nat", "port_forward": [{ "outside": 2222, "inside": 22, "protocal": "tcp" }] }, { "device": "e1000", "id": "e1000.1", "network_mode": "nat", "mac": target_mac, }] conf["compute"]["storage_backend"] = [ { "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, ] node = model.CNode(conf) node.init() node.precheck() node.start() # first s : unixsocket .monitor path = os.path.join(node.workspace.get_workspace(), ".monitor") s = UnixSocket(path) s.connect() s.recv() payload_enable_qmp = {"execute": "qmp_capabilities"} s.send(json.dumps(payload_enable_qmp)) s.recv() ssh = prepare_ssh()
def setUpClass(cls): os.environ["PATH"] = new_path fake_config = fixtures.FakeConfig() cls.node_info = fake_config.get_node_info() # start node test1 cls.node_info['name'] = "test1" node1 = model.CNode(cls.node_info) node1.init() node1.precheck() node1.start() time.sleep(2)
def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() self.conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "file": fixtures.image }] }]
def setUpClass(cls): node_info = {} fake_config = fixtures.FakeConfig() node_info = fake_config.get_node_info() cls.bmc_conf = os.path.join(config.infrasim_home, node_info["name"], "data", "vbmc.conf") with open(cls.TMP_CONF_FILE, "w") as f: yaml.dump(node_info, f, default_flow_style=False) node = CNode(node_info) node.init() node.precheck()
def tearDown(self): node1 = model.CNode(self.node_info) node1.init() node1.stop() node1.terminate_workspace() self.node_info = None fake_config_2 = fixtures.FakeConfig() node_info_2 = fake_config_2.get_node_info() node_info_2['name'] = "test2" node2 = model.CNode(node_info_2) node2.init() node2.stop() node2.terminate_workspace()
def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() self.conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "file": fixtures.image }] }] node = model.CNode(self.conf) node.init() self.workspace = node.workspace.get_workspace()
def get_node_directly(): global conf os.system("touch {0}".format(test_drive_directly_image)) fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["compute"]["boot"] = {"boot_order": "c"} conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": fixtures.image }] }, { "type": "lsisas3008", "max_drive_per_controller": 32, "drives": [{ "file": test_drive_directly_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "01234567", "version": "M001", "wwn": "0x5000C500852E2971", "share-rw": "true", "cache": "none", "scsi-id": 0, "slot_number": 0 }, { "file": test_drive_directly_image, "format": "raw", "vendor": "SEAGATE", "product": "ST4000NM0005", "serial": "12345678", "version": "M001", "wwn": "0x5000C500852E3141", "share-rw": "true", "cache": "none", "scsi-id": 1, "slot_number": 1 }] }] return conf
def start_node(node_type): global conf global tmp_conf_file fake_config = fixtures.FakeConfig() conf = fake_config.get_node_info() conf["type"] = node_type conf["compute"]["storage_backend"] = [{ "type": "ahci", "max_drive_per_controller": 6, "drives": [{ "size": 8, "file": test_img_file }] }] with open(tmp_conf_file, "w") as yaml_file: yaml.dump(conf, yaml_file, default_flow_style=False) node = model.CNode(conf) node.init() node.precheck() node.start() time.sleep(3) import telnetlib tn = telnetlib.Telnet(host="127.0.0.1", port=2345) tn.read_until("(qemu)") tn.write("hostfwd_add ::2222-:22\n") tn.read_until("(qemu)") tn.close() ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) paramiko.util.log_to_file("filename.log") helper.try_func(600, paramiko.SSHClient.connect, ssh, "127.0.0.1", port=2222, username="******", password="******", timeout=120) ssh.close() time.sleep(5)
def setUpClass(cls): node_info = {} fake_config = fixtures.FakeConfig() node_info = fake_config.get_node_info() cls.bmc_conf = os.path.join(os.environ["HOME"], ".infrasim", node_info["name"], "data", "vbmc.conf") with open(cls.TMP_CONF_FILE, "w") as f: yaml.dump(node_info, f, default_flow_style=False) node = CNode(node_info) node.init() node.precheck() node.start() # Wait ipmi_sim start. time.sleep(2)
def test_start_stop_stress(self): self.node_info = {} fake_config = fixtures.FakeConfig() self.node_info = fake_config.get_node_info() count = 5 for i in range(count): node = model.CNode(self.node_info) node.init() node.precheck() node.start() node = model.CNode(self.node_info) node.init() node.stop() node.status() node.terminate_workspace()
def setUp(self): fake_config = fixtures.FakeConfig() self.conf = fake_config.get_node_info() node = model.CNode(self.conf) node.init() node.precheck() node.start() time.sleep(3) # Start sol in a subprocess self.fw = open('/tmp/test_sol', 'wb') self.p_sol = subprocess.Popen("ipmitool -I lanplus -U admin -P admin " "-H 127.0.0.1 sol activate", shell=True, stdin=subprocess.PIPE, stdout=self.fw, stderr=self.fw, bufsize=1)
def setUpClass(cls): node_info = {} fake_config = fixtures.FakeConfig() node_info = fake_config.get_node_info() node_info["bmc"] = {} node_info["bmc"]["interface"] = "lo" node_info["bmc"]["ipmi_over_lan_port"] = 625 node_info["ipmi_console_ssh"] = 9401 node_info["ipmi_console_port"] = 9101 cls.bmc_conf = os.path.join(config.infrasim_home, node_info["name"], "data", "vbmc.conf") with open(cls.TMP_CONF_FILE, "w") as f: yaml.dump(node_info, f, default_flow_style=False) node = CNode(node_info) node.init() node.precheck()