def setUp(self): # initialize polarion case self.case_id = None self.case_state = None host_string = os.environ.get('HOST_STRING') username = os.environ.get('USERNAME') passwd = os.environ.get('PASSWD') self.host = Machine(host_string, username, passwd) self.old_ver = self.params.get('old_machines_rpm_ver') self.new_ver = self.params.get('new_machines_rpm_ver') self.base_url = self.params.get('base_url') for rpm_name in [self.old_ver, self.new_ver]: cmd = 'test -e {}'.format(rpm_name) ret = self.host.execute(cmd, raise_exception=False) if ret.succeeded: continue split_dash = rpm_name.split('-') split_dot = split_dash[-1].split('.') args = {} args['ver1'] = split_dash[2] args['ver2'] = '.'.join([split_dot[0], split_dot[1]]) args['arch'] = split_dot[2] args['name'] = rpm_name url = self.base_url + URL_VER.format(**args) cmd = 'wget {}'.format(url) self.host.execute(cmd)
def setUp(self): a = self.get_data('ovirt_package.yml') self.config_dict = yaml.load(open(a)) self.OLD_MACHINES_RPM_NAME = self.config_dict['old_pkg'] self.NEW_MACHINES_RPM_NAME = self.config_dict['new_pkg'] self.base = self.config_dict['base_url'] host_string = os.environ.get('HOST_STRING') username = os.environ.get('USERNAME') passwd = os.environ.get('PASSWD') self.host = Machine(host_string, username, passwd) for rpm_name in [ self.OLD_MACHINES_RPM_NAME, self.NEW_MACHINES_RPM_NAME ]: split_dash = rpm_name.split('-') split_dot = split_dash[-1].split('.') args = {} args['base'] = self.base args['ver1'] = split_dash[3] args['ver2'] = '.'.join([split_dot[0], split_dot[1]]) args['arch'] = split_dot[2] args['name'] = rpm_name url = BASE_URL.format(**args) cmd = 'curl -o {} {}'.format(rpm_name, url) self.host.execute(cmd)
def clean_nfs_storage(self, nfs_ip, nfs_pass, nfs_path): try: host_ins = Machine( host_string=nfs_ip, host_user='******', host_passwd=nfs_pass) host_ins.execute("rm -rf %s/*" % nfs_path) except: import traceback traceback.print_exc() self.fail()
def check_additional_host_socre(self, ip, passwd): cmd = "hosted-engine --vm-status --json" host_ins = Machine( host_string=ip, host_user='******', host_passwd=passwd) i = 0 while True: if i > 10: raise RuntimeError( "Timeout waitting for host to available running HE.") ret = host_ins.execute(cmd) true, false = True, False if eval(ret[1])["2"]["score"] == 3400: break time.sleep(10) i += 1
def setUp(self): # get params from os.environ host_string = os.environ.get('HOST_STRING') username = os.environ.get('USERNAME') passwd = os.environ.get('PASSWD') browser = os.environ.get('BROWSER') selenium_hub = os.environ.get('HUB') # create host object self.host = Machine(host_string, username, passwd) # create selenium webdriver object if not selenium_hub: if browser == 'firefox': self.driver = webdriver.Firefox() else: self.driver = webdriver.Chrome() else: hub_url = 'http://%s:4444/wd/hub' % selenium_hub capabilities = self._get_desired_capabilities(browser) self.driver = webdriver.Remote( command_executor=hub_url, desired_capabilities=capabilities) # initialize webdriver self.driver.set_window_size(1200, 1200) self.driver.set_page_load_timeout(90) self.screenshot_path = self.logdir # open target page self.open_cockpit(host_string, browser) self.login(username, passwd) self.open_page()
def prepare_env(self, storage_type='nfs'): additional_host = Machine(host_string=self.config_dict['second_host'], host_user='******', host_passwd=self.config_dict['second_pass']) if additional_host.execute('hosted-engine --check-deployed',raise_exception=False).stdout == "": additional_host.execute("yes|sh /usr/sbin/ovirt-hosted-engine-cleanup", timeout=250) if self.host.execute('rpm -qa|grep appliance',raise_exception=False).stdout == "": self.install_rhvm_appliance(self.config_dict['rhvm_appliance_path']) if self.host.execute('hosted-engine --check-deployed', raise_exception=False).stdout == "": self.backup_remove_logs() self.clean_hostengine_env() self.refresh() self.switch_to_frame(self.OVIRT_HOSTEDENGINE_FRAME_NAME) if storage_type == 'nfs': self.clean_nfs_storage(self.config_dict['nfs_ip'], self.config_dict['nfs_pass'], self.config_dict['nfs_dir']) elif storage_type == 'iscsi': try: self.host.get_file('/etc/iscsi/initiatorname.iscsi','./initiatorname.iscsi') new_line = '' with open('./initiatorname.iscsi') as config_file: for line in config_file: if line.startswith('InitiatorName'): new_line = line.replace(line.split('=')[-1],self.config_dict['iscsi_initiator_name']) with open('./initiatorname.iscsi', 'w') as config_file: config_file.write(new_line) self.host.put_file('./initiatorname.iscsi','/etc/iscsi/initiatorname.iscsi') os.remove('./initiatorname.iscsi') self.host.execute('systemctl restart iscsid iscsi') self.clean_iscsi_storage(self.config_dict['iscsi_portal_ip']) except Exception as e: pass elif storage_type == 'fc': luns_fc_storage = self.config_dict['luns_fc_storage'] for lun_id in luns_fc_storage: # self.clean_fc_storage(lun_id) pass elif storage_type == 'gluster': glusterfs_servers = list(self.config_dict['gluster_ips'].values()) for ip in glusterfs_servers: self.clean_glusterfs_storage_pre(ip, self.config_dict['root_passwd']) self.clean_glusterfs_storage_post(glusterfs_servers[0],self.config_dict['root_passwd'])
def setUp(self): # initialize polarion case self.case_id = None self.case_state = None host_string = os.environ.get('HOST_STRING') username = os.environ.get('USERNAME') passwd = os.environ.get('PASSWD') self.host = Machine(host_string, username, passwd) self.new_ver = self.params.get('new_machines_rpm_ver') # put repo file if "el7" in self.new_ver: repo_file_name = "mc_7.repo" else: repo_file_name = "mc_n.repo" cmd = 'test -e /etc/yum.repos.d/{}'.format(repo_file_name) ret = self.host.execute(cmd, raise_exception=False) if not ret.succeeded: repo_file_path = self.get_data(repo_file_name) self.host.put_file(repo_file_path, "/etc/yum.repos.d/")
def __init__(self, disk_path, guest_ip, host_ip, cpu_to_pin="2"): super(Qemu, self).__init__(disk_path, guest_ip, host_ip) self._pid = None self.cpu_to_pin = cpu_to_pin # self.cpu_num = cpu_num self.mac_address = "52:54:00:a0:e5:1c" self.vnc_number = "10" self.ethernet_dev = self.QEMU_E1000 # can be "virtio-net-pci" or "e1000" self.vhost = False self.sidecore = False self.mem = "8192" self.io_thread_cpu = "" # auto config self.tap_device = '' self.pidfile = None self.qemu_config = dict() self.bridge = None self.exe = self.QEMU_EXE self.is_io_thread_nice = False self.io_nice = 1 # nice value to set self.root = Machine(self._remote_ip, "root") # self.kernel = r"/home/bdaviv/repos/e1000-improv/linux-3.13.0/arch/x86/boot/bzImage" # self.kernel = r"/homes/bdaviv/repos/msc-ng/linux-4.13.9/arch/x86/boot/bzImage" self.kernel = r"../vms/vmlinuz" #r"../linux/arch/x86/boot/bzImage" # self.initrd = r"../vms/initrd.img" # self.initrd = r"/homes/bdaviv/repos/msc-ng/vm-files/kernels/initrd.img-4.13.9-ng+" self.initrd = r"../vms/initrd.img" # r"../vms/initrd.img" self.kernel_cmdline = r"BOOT_IMAGE=/vmlinuz-5.4.0-73-generic root=/dev/mapper/ubuntu--vg-ubuntu--lv ro maybe-ubiquity" self.kernel_cmdline_additional = "" self.nic_additionals = "" self.qemu_additionals = "" self.disable_kvm_poll = False self.guest_e1000_ng_flag = 0 self.qmp = None
class TestMachinesOvirtPackage(Test): """ :avocado: enable :avocado: tags=ovirt_pkg """ def setUp(self): a = self.get_data('ovirt_package.yml') self.config_dict = yaml.load(open(a)) self.OLD_MACHINES_RPM_NAME = self.config_dict['old_pkg'] self.NEW_MACHINES_RPM_NAME = self.config_dict['new_pkg'] self.base = self.config_dict['base_url'] host_string = os.environ.get('HOST_STRING') username = os.environ.get('USERNAME') passwd = os.environ.get('PASSWD') self.host = Machine(host_string, username, passwd) for rpm_name in [ self.OLD_MACHINES_RPM_NAME, self.NEW_MACHINES_RPM_NAME ]: split_dash = rpm_name.split('-') split_dot = split_dash[-1].split('.') args = {} args['base'] = self.base args['ver1'] = split_dash[3] args['ver2'] = '.'.join([split_dot[0], split_dot[1]]) args['arch'] = split_dot[2] args['name'] = rpm_name url = BASE_URL.format(**args) cmd = 'curl -o {} {}'.format(rpm_name, url) self.host.execute(cmd) def test_upgrade_pkg(self): cmd = 'rpm -e cockpit-machines-ovirt --nodeps' self.host.execute(cmd) cmd = "rpm -ivh {}".format(self.OLD_MACHINES_RPM_NAME) self.host.execute(cmd) cmd = "rpm -Uvh {}".format(self.NEW_MACHINES_RPM_NAME) self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines-ovirt --color=never' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret + '.rpm', self.NEW_MACHINES_RPM_NAME) def test_remove_pkg(self): cmd = 'rpm -e cockpit-machines-ovirt --nodeps' self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines-ovirt' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret, '') def test_install_pkg(self): cmd = 'rpm -ivh {}'.format(self.NEW_MACHINES_RPM_NAME) self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines-ovirt --color=never' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret + '.rpm', self.NEW_MACHINES_RPM_NAME) cmd = 'systemctl enable cockpit.socket && systemctl start cockpit.socket' self.host.execute(cmd)
def clean_nfs_storage(self, nfs_ip, nfs_pass, nfs_path): host_ins = Machine(host_string=nfs_ip, host_user='******', host_passwd=nfs_pass) host_ins.execute("rm -rf %s/*" % nfs_path)
def clean_glusterfs_storage_post(self, glusterfs_ip, password): host_glusterfs_server = Machine(host_string=glusterfs_ip, host_user='******', host_passwd=password) try: #host_glusterfs_server.execute("gluster v create gv1 replica 3 bootp-73-131-238.rhts.eng.pek2.redhat.com:/data/gluster/gv1 bootp-73-131-184.rhts.eng.pek2.redhat.com:/data/gluster/gv1 bootp-73-131-188.rhts.eng.pek2.redhat.com:/data/gluster/gv1") host_glusterfs_server.execute("gluster v create gv1 replica 3 {0}:/data/gluster/gv1 {1}:/data/gluster/gv1 {2}:/data/gluster/gv1".format(*self.config_dict['gluster_ips'].keys())) host_glusterfs_server.execute("gluster volume set gv1 cluster.quorum-type auto") host_glusterfs_server.execute("gluster volume set gv1 network.ping-timeout 10") host_glusterfs_server.execute("gluster volume set gv1 auth.allow \*") host_glusterfs_server.execute("gluster volume set gv1 group virt") host_glusterfs_server.execute("gluster volume set gv1 storage.owner-uid 36") host_glusterfs_server.execute("gluster volume set gv1 storage.owner-gid 36") host_glusterfs_server.execute("gluster volume set gv1 server.allow-insecure on") host_glusterfs_server.execute("gluster volume start {}".format(self.config_dict['gluster_volume'])) except: import traceback traceback.print_exc() self.fail()
def clean_glusterfs_storage_pre(self, glusterfs_ip, password): host_glusterfs_server = Machine(host_string=glusterfs_ip, host_user='******', host_passwd=password) try: if glusterfs_ip == list(self.config_dict['gluster_ips'].values())[0]: host_glusterfs_server.execute("yes|gluster volume stop {}".format(self.config_dict['gluster_volume']), raise_exception=False) host_glusterfs_server.execute("yes|gluster v delete {}".format(self.config_dict['gluster_volume']), raise_exception=False) host_glusterfs_server.execute("umount {}".format(self.config_dict['gluster_volume_mount'])) host_glusterfs_server.execute("mkfs.ext4 /dev/sdb1") host_glusterfs_server.execute("mount /dev/sdb1 {}".format(self.config_dict['gluster_volume_mount'])) host_glusterfs_server.execute("mkdir {0}/{1}".format(self.config_dict['gluster_volume_mount'],self.config_dict['gluster_volume'])) except: import traceback traceback.print_exc() self.fail()
class TestMachinesLibvirtPackage(Test): """ :avocado: enable :avocado: tags=machines_pkg """ def setUp(self): # initialize polarion case self.case_id = None self.case_state = None host_string = os.environ.get('HOST_STRING') username = os.environ.get('USERNAME') passwd = os.environ.get('PASSWD') self.host = Machine(host_string, username, passwd) self.old_ver = self.params.get('old_machines_rpm_ver') self.new_ver = self.params.get('new_machines_rpm_ver') self.base_url = self.params.get('base_url') for rpm_name in [self.old_ver, self.new_ver]: cmd = 'test -e {}'.format(rpm_name) ret = self.host.execute(cmd, raise_exception=False) if ret.succeeded: continue split_dash = rpm_name.split('-') split_dot = split_dash[-1].split('.') args = {} args['ver1'] = split_dash[2] args['ver2'] = '.'.join([split_dot[0], split_dot[1]]) args['arch'] = split_dot[2] args['name'] = rpm_name url = self.base_url + URL_VER.format(**args) cmd = 'wget {}'.format(url) self.host.execute(cmd) @check_case_id def tearDown(self): pass @add_case_id("RHEL-114013") def test_upgrade_pkg(self): cmd = "rpm -i {}".format(self.old_ver) self.host.execute(cmd) cmd = "rpm -U {}".format(self.new_ver) self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines --color=never' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret + '.rpm', self.new_ver) @add_case_id("RHEL-115592") def test_remove_pkg(self): """ :avocado: tags=test """ cmd = 'rpm -e cockpit-machines' self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret, '') @add_case_id("RHEL-113808") def test_install_pkg(self): cmd = 'rpm -i {}'.format(self.new_ver) self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines --color=never' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret + '.rpm', self.new_ver) def test_start_cockpit(self): cmd = 'systemctl enable cockpit.socket && systemctl start cockpit.socket' self.host.execute(cmd)
class TestMachinesLibvirtPackage(Test): """ :avocado: enable :avocado: tags=machines_pkg """ def setUp(self): # initialize polarion case self.case_id = None self.case_state = None host_string = os.environ.get('HOST_STRING') username = os.environ.get('USERNAME') passwd = os.environ.get('PASSWD') self.host = Machine(host_string, username, passwd) self.new_ver = self.params.get('new_machines_rpm_ver') # put repo file if "el7" in self.new_ver: repo_file_name = "mc_7.repo" else: repo_file_name = "mc_n.repo" cmd = 'test -e /etc/yum.repos.d/{}'.format(repo_file_name) ret = self.host.execute(cmd, raise_exception=False) if not ret.succeeded: repo_file_path = self.get_data(repo_file_name) self.host.put_file(repo_file_path, "/etc/yum.repos.d/") @check_case_id def tearDown(self): pass @add_case_id("RHEL-113808") def test_install_pkg(self): cmd = 'yum install -y cockpit-machines' self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines --color=never' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret + '.rpm', self.new_ver) @add_case_id("RHEL-115592") def test_remove_pkg(self): """ :avocado: tags=test """ cmd = 'yum remove -y cockpit-machines' self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret, '') @add_case_id("RHEL-114013") def test_update_pkg(self): old_ver = self.params.get('old_machines_rpm_ver') if 'el7' in old_ver: base_url = self.params.get('base_url_7') else: base_url = self.params.get('base_url_n') # download old ver cmd = 'test -e {}'.format(old_ver) ret = self.host.execute(cmd, raise_exception=False) if not ret.succeeded: split_dash = old_ver.split('-') split_dot = split_dash[-1].split('.') args = {} args['ver1'] = split_dash[2] args['ver2'] = '.'.join([split_dot[0], split_dot[1]]) args['arch'] = split_dot[2] args['name'] = old_ver url = base_url + URL_VER.format(**args) cmd = 'curl -O {}'.format(url) self.host.execute(cmd) # install old ver cmd = "rpm -i {}".format(old_ver) self.host.execute(cmd) # yum update to latest ver cmd = "yum update -y cockpit-machines" self.host.execute(cmd) cmd = 'rpm -qa | grep cockpit-machines --color=never' ret = self.host.execute(cmd, raise_exception=False) self.assertEqual(ret + '.rpm', self.new_ver) def test_start_cockpit(self): cmd = 'systemctl enable cockpit.socket && systemctl restart cockpit.socket' self.host.execute(cmd)