def test_cloudinit_login_with_publickey(self): """ :avocado: tags=tier1,cloudinit,cloud_utils_growpart,dependencies RHEL7-87453: WALA-TC: [Cloudinit] VM can successfully login after provisioning(with publickey authentication) 1. Create a VM with only public key authentication 2. Login with publickey, should have sudo privilege """ self.log.info( "RHEL7-87453: WALA-TC: [Cloudinit] VM can successfully login " "after provisioning(with publickey authentication)") self.vm.create(wait=True) self.session.connect(authentication="publickey") self.assertEqual(self.vm.vm_username, self.session.cmd_output("whoami"), "Fail to login with publickey") self.assertIn( "%s ALL=(ALL) NOPASSWD:ALL" % self.vm.vm_username, self.session.cmd_output( "sudo cat /etc/sudoers.d/90-cloud-init-users"), "No sudo privilege") # Collect /var/log/cloud-init.log and /var/log/messages try: self.session.cmd_output("mkdir -p /tmp/logs") self.session.cmd_output( "sudo cp /var/log/cloud-init.log /tmp/logs/") self.session.cmd_output("sudo cp /var/log/messages /tmp/logs/") self.session.cmd_output("sudo chmod 644 /tmp/logs/*") host_logpath = os.path.dirname(self.job.logfile) + "/logs" command("mkdir -p {}".format(host_logpath)) self.session.copy_files_from("/tmp/logs/*", host_logpath) except: pass
def redeploy(self, wait=True): cmd = 'az vm redeploy --name "{}" --resource-group "{}"'.format( self.vm_name, self.resource_group) if not wait: cmd += " --no-wait" command(cmd) self.show()
def osdisk_resize(self, size, wait=True): cmd = 'az vm update --name "{}" --resource-group {} '\ '--set storageProfile.osDisk.diskSizeGB={}'\ .format(self.vm_name, self.resource_group, size) if not wait: cmd += " --no-wait" command(cmd)
def disk_attach(self, name, size, new=True): cmd = 'az vm disk attach --name {} --vm-name "{}" '\ '--resource-group {} --size-gb {}'\ .format(name, self.vm_name, self.resource_group, size) if new: cmd += " --new" command(cmd)
def deallocate(self, wait=True): cmd = 'az vm deallocate --name "{}" --resource-group "{}"'.format( self.vm_name, self.resource_group) if not wait: cmd += " --no-wait" command(cmd) self.show()
def delete(self, wait=False): cmd = self.basecli + ' delete --name {} --resource-group "{}" '\ '--nic-name {}'.format(self.name, self.resource_group, self.nic_name) if not wait: cmd += " --no-wait" command(cmd) return True
def user_update(self, username, password=None, ssh_key_value=None): cmd = 'az vm user update --name "{}" \ --resource-group {} --username {}'.format(self.vm_name, self.resource_group, username) if ssh_key_value: cmd += ' --ssh-key-value "{}"'.format(ssh_key_value) if password: cmd += ' --password "{}"'.format(password) command(cmd)
def unmanaged_disk_attach(self, name, size, new=True, disk_uri=""): cmd = 'az vm unmanaged-disk attach --name {} --vm-name "{}" '\ '--resource-group {} --size-gb {}'\ .format(name, self.vm_name, self.resource_group, size) if new: cmd += " --new" else: cmd += " --vhd-uri {}".format(disk_uri) command(cmd)
def extension_set(self, name, publisher, settings='', protected_settings=''): cmd = 'az vm extension set --name "{}" --vm-name "{}" \ --resource-group {} --publisher "{}"'.format(name, self.vm_name, self.resource_group, publisher) if protected_settings: cmd += " --protected-settings '{}'".format(protected_settings) if settings: cmd += " --settings '{}'".format(settings) command(cmd)
def delete(self, wait=True): cmd = 'az vm delete --name "{}" --resource-group "{}" --yes'.format( self.vm_name, self.resource_group) if not wait: cmd += " --no-wait" command(cmd) # Sometimes VM still exists for a while after cli finished if wait: error_message = "Timed out waiting for server to get deleted." for count in utils_misc.iterate_timeout(100, error_message, wait=10): if not self.exists(): break
def create(self, wait=True): cmd = 'az vm create --name "{}" --resource-group "{}" --image "{}" '\ '--size "{}" --admin-username "{}" --authentication-type "{}" '\ ' --os-disk-name "{}"'\ .format(self.vm_name, self.resource_group, self.image, self.size, self.vm_username, self.authentication_type, self.os_disk_name) if self.ssh_key_value: cmd += ' --ssh-key-value {}'.format(self.ssh_key_value) elif self.generate_ssh_keys: cmd += " --generate-ssh-keys" if self.vm_password and self.authentication_type != "ssh": cmd += ' --admin-password "{}"'.format(self.vm_password) if self.custom_data: cmd += ' --custom-data "{}"'.format(self.custom_data) if self.use_unmanaged_disk: cmd += ' --use-unmanaged-disk --storage-account {}'.format( self.storage_account) if self.assign_identity: cmd += " --assign-identity" cmd += ' --scope "{}"'.format(self.scope) if ".vhd" in self.image: cmd += ' --os-type "{}"'.format(self.os_type) if self.nics: cmd += ' --nics {}'.format(self.nics) else: cmd += ' --vnet-name "{}" --subnet "{}"'.format( self.vnet_name, self.subnet) if self.os_disk_size: cmd += ' --os-disk-size-gb {}'.format(self.os_disk_size) if not wait: cmd += " --no-wait" ret = command(cmd) if len(ret.stdout): return self.show()
def create(self): cmd = 'az group create --location "{}" --resource-group "{}"'.format( self.location, self.name) ret = command(cmd) if len(ret.stdout): info = json.loads(ret.stdout) self.id = info["id"] self.properties = info["properties"] return True
def create(self): cmd = self.basecli + ' create --name "{}" --resource-group "{}" '\ '--source {} --hyper-v-generation {} --os-type linux'.format( self.name, self.resource_group, self.source, self.generation) ret = command(cmd) if len(ret.stdout): info = json.loads(ret.stdout) self.id = info["id"] self.properties = info return True
def create(self): cmd = self.basecli + \ ' create --name "{}" --resource-group "{}"'.format( self.name, self.resource_group) ret = command(cmd) if len(ret.stdout): info = json.loads(ret.stdout).get("publicIp") self.id = info["id"] self.properties = info return True
def show(self): cmd = 'az group show --resource-group "{}"'.format(self.name) try: ret = command(cmd) except: return False if len(ret.stdout): info = json.loads(ret.stdout) self.id = info["id"] self.properties = info["properties"] return True
def show(self): cmd = self.basecli + ' show --name {} --resource-group "{}"'.format( self.name, self.resource_group) try: ret = command(cmd) except: return False if len(ret.stdout): info = json.loads(ret.stdout) self.id = info["id"] self.properties = info return True
def run_command(self, command_id="RunShellScript", scripts=None, parameters=None): cmd = 'az vm run-command invoke --name "{}" --resource-group {} \ --command-id {}'.format(self.vm_name, self.resource_group, command_id) if scripts: cmd += ' --scripts \'{}\''.format(scripts) if parameters: cmd += ' --parameters \'{}\''.format(parameters) ret = command(cmd) return json.loads(ret.stdout).get("value")[0].get("message")
def show(self): cmd = 'az vm show -d --name "{}" --resource-group "{}"'.format( self.vm_name, self.resource_group) try: ret = command(cmd) except: return False if len(ret.stdout): info = json.loads(ret.stdout) self.properties = info return True return False
def create(self): cmd = self.basecli + ' create --name "{}" --resource-group "{}" '\ '--nic-name {} --vnet-name {} --subnet {}'.format(self.name, self.resource_group, self.nic_name, self.vnet, self.subnet) if self.ip_version: cmd += ' --private-ip-address-version {}'.format(self.ip_version) ret = command(cmd) if len(ret.stdout): info = json.loads(ret.stdout) self.id = info["id"] self.properties = info return True
def test_create_vm_sshkey(self): """ :avocado: tags=tier1 RHEL7-41652 WALA-TC: [life cycle] Create a VM with sshkey """ self.assertTrue(self.session.connect(authentication="publickey"), "Fail to login through sshkey") output = self.session.cmd_output("sudo cat /etc/sudoers.d/waagent") expect = "{0} ALL=(ALL) NOPASSWD: ALL".format(self.vm.vm_username) self.assertEqual( output, expect, "Wrong sudoer permission.\nExpect: {0}\n" "Real: {1}".format(expect, output)) # Collect /var/log/cloud-init.log and /var/log/messages try: self.session.cmd_output("mkdir -p /tmp/logs") self.session.cmd_output("sudo cp /var/log/waagent.log /tmp/logs/") self.session.cmd_output("sudo cp /var/log/messages /tmp/logs/") self.session.cmd_output("sudo chmod 644 /tmp/logs/*") host_logpath = os.path.dirname(self.job.logfile) + "/logs" utils_azure.command("mkdir -p {}".format(host_logpath)) self.session.copy_files_from("/tmp/logs/*", host_logpath) except: pass
def create(self): cmd = self.basecli + ' create --name "{}" --resource-group "{}" '\ '--vnet-name {} --subnet {}'.format( self.name, self.resource_group, self.vnet, self.subnet) if self.publicip: cmd += " --public-ip-address {}".format(self.publicip) if self.sriov: cmd += " --accelerated-networking {}".format(self.sriov) if self.ip_version: cmd += " --private-ip-address-version {}".format(self.ip_version) ret = command(cmd) if len(ret.stdout): info = json.loads(ret.stdout).get("NewNIC") self.id = info["id"] self.properties = info return True
def setUp(self): account = AzureAccount(self.params) account.login() self.project = self.params.get("rhel_ver", "*/VM/*") self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0] if self.case_short_name == "test_provision_gen2_vm": if LooseVersion(self.project) < LooseVersion('7.8'): self.cancel( "Skip case because RHEL-{} ondemand image doesn't support gen2".format(self.project)) cloud = Setup(self.params, self.name, size="DC2s") else: cloud = Setup(self.params, self.name) self.vm = cloud.vm pre_delete = False if self.case_short_name == "test_provision_with_2_keys": pre_delete = True self.vm.vm_name += "-2keys" key1 = "{}/.ssh/id_rsa.pub".format(os.path.expanduser('~')) key2 = "/tmp/newkey.pub" if not os.path.exists(key2): command("ssh-keygen -f {} -q -N ''".format(key2.split('.')[0])) self.assertTrue(os.path.exists(key1), "Key {} doesn't exist".format(key1)) self.assertTrue(os.path.exists(key2), "Key {} doesn't exist".format(key2)) self.vm.ssh_key_value = "{} {}".format(key1, key2) with open(key1) as f: self.key1_value = f.read().rsplit(' ', 1)[0] with open(key2) as f: self.key2_value = f.read().rsplit(' ', 1)[0] self.session = cloud.init_vm(pre_delete=pre_delete) self.username = self.vm.vm_username self.package = self.params.get("packages", "*/Other/*") if self.case_short_name == "test_install_uninstall_package": if self.session.cmd_status_output("ls /tmp/{}".format(self.package))[0] != 0: self.cancel("Package doesn't exist. Skip case.") if self.case_short_name.startswith("test_host_plugin"): self.session.cmd_output( "sudo /usr/bin/cp /etc/waagent.conf{,-bak}") if self.case_short_name == "test_upgrade_downgrade_package": rhel7_old_pkg_url = "http://download.eng.bos.redhat.com/brewroot/vol/rhel-7/packages/WALinuxAgent/2.2.32/1.el7/noarch/WALinuxAgent-2.2.32-1.el7.noarch.rpm" rhel8_old_pkg_url = "http://download.eng.bos.redhat.com/brewroot/vol/rhel-8/packages/WALinuxAgent/2.2.32/1.el8/noarch/WALinuxAgent-2.2.32-1.el8.noarch.rpm" try: self.assertEqual(0, self.session.cmd_status_output("ls /tmp/{}".format(self.package))[0], "No new pakcage in guest VM") import requests if str(self.project).startswith('7'): old_pkg_url = rhel7_old_pkg_url elif str(self.project).startswith('8'): old_pkg_url = rhel8_old_pkg_url self.old_pkg = old_pkg_url.split('/')[-1] if not os.path.exists("/tmp/{}".format(self.old_pkg)): r = requests.get(old_pkg_url, allow_redirects=True) open("/tmp/{}".format(self.old_pkg), 'wb').write(r.content) self.session.copy_files_to( local_path="/tmp/{}".format(self.old_pkg), remote_path="/tmp/{}".format(self.old_pkg)) self.assertEqual(0, self.session.cmd_status_output("ls /tmp/{}".format(self.old_pkg))[0], "No old pakcage in guest VM") except: self.cancel( "No old or new package in guest VM. Skip this case.")
def test_package_00_preparation(self): """ Prepare environment for running cases """ # Login with root self.session.cmd_output("sudo /usr/bin/cp -a /home/{0}/.ssh /root/;\ sudo chown -R root:root /root/.ssh".format(self.vm.vm_username)) self.session.close() origin_username = self.vm.vm_username self.vm.vm_username = "******" self.session.connect(authentication="publickey") # Copy and install package into guest self.session.cmd_output("rm -rf /tmp/*") self.session.copy_files_to(local_path="%s/../../*.rpm" % (self.pwd), remote_path="/tmp") command("systemctl start squid") self.assertEqual( command("netstat -tln|grep 3128").exit_status, 0, "Fail to enable squid in host") self.session.cmd_output("rm -f /etc/yum.repos.d/*") self.session.cmd_output("yum clean all") import re x_match = re.findall("el([0-9]+).*", self.package_list[0]) if x_match: x_version = int(x_match[0]) else: if self.project: x_version = self.project.split('.')[0] else: # Currently the latest major release is 8. Need to be updated for # future major releases x_version = 9 label = "BaseOS" if x_version > 7 else "Server" # Validate these repos one by one and select the available one base_url_list = [ "http://download-node-02.eng.bos.redhat.com/rhel-{}/rel-eng/RHEL-{}/latest-RHEL-{}/compose/{}/x86_64/os/" .format(x_version, x_version, self.project, {}), "http://download-node-02.eng.bos.redhat.com/rhel-{}/rel-eng/updates/RHEL-{}/latest-RHEL-{}/compose/{}/x86_64/os/" .format(x_version, x_version, self.project, {}), "http://download-node-02.eng.bos.redhat.com/rhel-{}/nightly/RHEL-{}/latest-RHEL-{}/compose/{}/x86_64/os/" .format(x_version, x_version, self.project, {}), ] for base_url in base_url_list: if requests.get(base_url.format(label)).ok: break BASEREPO = """ [rhel-base] name=rhel-base baseurl={} enabled=1 gpgcheck=0 proxy=http://127.0.0.1:8080/ EOF """.format(base_url.format(label)) APPSTREAMREPO = """ [rhel-appstream] name=rhel-appstream baseurl={} enabled=1 gpgcheck=0 proxy=http://127.0.0.1:8080/ EOF """.format(base_url.format("AppStream")) pulpcore_url = "http://download.eng.bos.redhat.com/brewroot/repos/pulpcore-3.4-rhel-{}-build/latest/x86_64/".format( x_version) PULPCOREREPO = """ [pulpcore-3.4] name=pulpcore-3.4 baseurl={} enabled=1 gpgcheck=0 proxy=http://127.0.0.1:8080/ EOF """.format(pulpcore_url) self.session.cmd_output("cat << EOF > /etc/yum.repos.d/rhel.repo%s" % (BASEREPO)) # WALA doesn't use pulpcore repo to avoid the RHEL-8.0 systemd update issue if "WALinuxAgent" not in self.packages and requests.get( pulpcore_url).ok: self.session.cmd_output( "cat << EOF >> /etc/yum.repos.d/rhel.repo%s" % (PULPCOREREPO)) if x_version > 7: self.session.cmd_output( "cat << EOF >> /etc/yum.repos.d/rhel.repo%s" % (APPSTREAMREPO)) # If not kernel, remove old package pkgname_list = [pn.rsplit('-', 2)[0] for pn in self.package_list] self.log.debug("Package name list: {}".format(pkgname_list)) if "kernel" not in pkgname_list: [ self.session.cmd_output("rpm -e {}".format(pkgname)) for pkgname in pkgname_list ] # Install package _yum_install = "ssh -o UserKnownHostsFile=/dev/null -o \ StrictHostKeyChecking=no -R 8080:127.0.0.1:3128 root@%s \ \"yum -y install {}\"" % self.vm.public_ip self.session.cmd_output("yum clean all") if self.session.cmd_status_output("rpm -ivh --force /tmp/*.rpm", timeout=300)[0] != 0: # command("ssh -o UserKnownHostsFile=/dev/null -o \ # StrictHostKeyChecking=no -R 8080:127.0.0.1:3128 root@%s \ # \"yum -y install /tmp/*.rpm\"" % self.vm.public_ip, # timeout=300) command(_yum_install.format("/tmp/*.rpm"), timeout=300) # Install cloud-init cloud-utils-growpart gdisk for cloud-init related # packages if x_version > 7: cloudinit_pkgs = [ 'cloud-init', 'python3-jsonpatch', 'cloud-utils-growpart', 'python3-jsonschema', 'python3-httpretty', 'python3-pyserial', 'python3-prettytable' ] else: cloudinit_pkgs = [ 'cloud-init', 'python-jsonpatch', 'cloud-utils-growpart', 'python-jsonschema', 'python-httpretty', 'pyserial', 'python-prettytable', 'python3-jsonpatch', 'python3-jsonschema', 'python3-httpretty', 'python3-prettytable' ] for cloudinit_pkg in cloudinit_pkgs: if cloudinit_pkg in self.packages: for pkg in ["cloud-init", "cloud-utils-growpart", "gdisk"]: if self.session.cmd_status_output( "rpm -q %s" % pkg)[0] != 0: # command("ssh -o UserKnownHostsFile=/dev/null -o \ # StrictHostKeyChecking=no -R 8080:127.0.0.1:3128 root@%s \"yum -y install %s\"" # % (self.vm.public_ip, pkg)) command(_yum_install.format(pkg)) break # If WALinuxAgent, install cloud-init and disable if self.packages.startswith("WALinuxAgent"): command(_yum_install.format("cloud-init")) # Install other necessary packages _other_pkgs = "tar net-tools bind-utils dracut-fips dracut-fips-aesni \ tcpdump" # command("ssh -o UserKnownHostsFile=/dev/null -o \ # StrictHostKeyChecking=no -R 8080:127.0.0.1:3128 root@%s \"yum -y install %s\"" # % (self.vm.public_ip, _other_pkgs)) command(_yum_install.format(_other_pkgs)) # Delete rhel.repo self.session.cmd_output("rm -f /etc/yum.repos.d/rhel.repo") # Verify packages are installed for pkg in self.package_list: self.assertEqual( self.session.cmd_status_output("rpm -q {}".format( pkg[:-4]))[0], 0, "Package {} is not installed.".format(pkg)) # Install RHUI package in case LISAv2 need to yum install packages. self.session.cmd_output( "rpm -e rhui-azure-rhel{0}; yum -y --config='https://rhelimage.blob.core.windows.net/repositories/rhui-microsoft-azure-rhel{0}.config' install 'rhui-azure-rhel{0}'" .format(x_version)) # Enable IPv6 init in ifcfg-eth0 for IPv6 case self.session.cmd_output( "sed -i 's/^IPV6INIT.*$/IPV6INIT=yes/g' /etc/sysconfig/network-scripts/ifcfg-eth0" ) # Deprovision image # If cloud-init related packages: if (list(set(pkgname_list).intersection(set(cloudinit_pkgs)))): if self.with_wala: depro_type = "cloudinit_wala" else: depro_type = "cloudinit" elif "WALinuxAgent" in pkgname_list: depro_type = "wala" elif "kernel" in pkgname_list: depro_type = "kernel" else: self.fail("Not supported package(s): {}".format(pkgname_list)) script = "deprovision_package.sh" self.session.copy_files_to(local_path="{0}/../../scripts/{1}".format( self.pwd, script), remote_path="/tmp") ret, output = self.session.cmd_status_output( "/bin/bash /tmp/{} all {} {}".format(script, depro_type, origin_username)) self.assertEqual(ret, 0, "Deprovision VM failed.\n{0}".format(output)) self.session.cmd_output("rm -f /root/.bash_history") self.session.cmd_output("export HISTSIZE=0") self.session.close() # Get OS disk name osdisk = self.vm.properties["storageProfile"]["osDisk"]["vhd"][ "uri"].split('/')[-1] self.log.debug("OS disk: {}".format(osdisk)) self.vm.image = osdisk with open("%s/../../osdisk" % self.pwd, 'w') as f: f.write(osdisk)
def list(location=None): cmd = "az group list" if location: cmd += " --query \"[?location=='{}']\"".format(location) ret = command(cmd) return json.loads(ret.stdout)
def delete(self, wait=False): cmd = 'az group delete --resource-group "{}" -y'.format(self.name) if not wait: cmd += " --no-wait" command(cmd) return True
def extension_delete(self, name): cmd = 'az vm extension delete --name "{}" --vm-name "{}" \ --resource-group {}'.format(name, self.vm_name, self.resource_group) command(cmd)
def list(): cmd = "az account list" command(cmd)
def unmanaged_disk_detach(self, name): cmd = 'az vm unmanaged-disk detach \ --name {} --vm-name "{}" --resource-group {}'.format(name, self.vm_name, self.resource_group) command(cmd)
def disk_detach(self, name): cmd = 'az vm disk detach --name {} --vm-name "{}" '\ '--resource-group {}'.format(name, self.vm_name, self.resource_group) command(cmd)
def user_reset_ssh(self, timeout=1200): cmd = 'az vm user reset-ssh --name "{}" --resource-group {}'\ .format(self.vm_name, self.resource_group) command(cmd, timeout)