def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # generate installed rpm list status, out = client.run_command( 'rpm -qa | sort > /tmp/installed_rpms.txt') client.get_file('/tmp/installed_rpms.txt', 'installed_rpms.txt') # compress logs dir status, out = client.run_command( 'cd /var/www/miq/vmdb; tar zcvf /tmp/appliance_logs.tgz log') client.get_file('/tmp/appliance_logs.tgz', 'appliance_logs.tgz')
def setup_external_auth_openldap(**data): """Sets up the appliance for an external authentication with OpenLdap. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaddress'], } appliance_obj = appliance.IPAppliance() appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = appliance_obj.address appliance_fqdn = '{}.{}'.format(appliance_name, data['domain_name']) ldapserver_ssh = SSHClient(**connect_kwargs) # updating the /etc/hosts is a workaround due to the # https://bugzilla.redhat.com/show_bug.cgi?id=1360928 command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ldapserver_ssh.run_command(command) ldapserver_ssh.get_file(remote_file=data['cert_filepath'], local_path=conf_path.strpath) ldapserver_ssh.close() ensure_browser_open() login_admin() auth = ExternalAuthSetting(get_groups=data.pop("get_groups", True)) auth.setup() appliance_obj.configure_appliance_for_openldap_ext_auth(appliance_fqdn) logout()
def setup_external_auth_openldap(**data): """Sets up the appliance for an external authentication with OpenLdap. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaddress'], } appliance_obj = appliance.IPAppliance() appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = appliance_obj.address appliance_fqdn = '{}.{}'.format(appliance_name, data['domain_name']) ldapserver_ssh = SSHClient(**connect_kwargs) # updating the /etc/hosts is a workaround due to the # https://bugzilla.redhat.com/show_bug.cgi?id=1360928 command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ldapserver_ssh.run_command(command) ldapserver_ssh.get_file(remote_file=data['cert_filepath'], local_path=conf_path.strpath) ldapserver_ssh.close() ensure_browser_open() login_admin() auth = ExternalAuthSetting(get_groups=data.pop("get_groups", True)) auth.setup() appliance_obj.configure_appliance_for_openldap_ext_auth(appliance_fqdn) logout()
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("address", help="hostname or ip address of target appliance") parser.add_argument("sdk_url", help="url to download sdk pkg") parser.add_argument( "--restart", help="restart evmserverd after installation " + "(required for proper operation)", action="store_true", ) args = parser.parse_args() ssh_kwargs = { "username": credentials["ssh"]["username"], "password": credentials["ssh"]["password"], "hostname": args.address, } # Init SSH client client = SSHClient(**ssh_kwargs) # start filename = args.sdk_url.split("/")[-1] foldername = os.path.splitext(filename)[0] # download print "Downloading sdk" status, out = client.run_command( "curl %(url)s -o %(file)s > /root/unzip.out 2>&1" % {"url": args.sdk_url, "file": filename} ) # extract print "Extracting sdk (" + filename + ")" status, out = client.run_command("unzip -o -f -d /var/www/miq/vmdb/lib/ %s" % filename) if status != 0: print out sys.exit(1) # install print "Installing sdk (" + foldername + ")" status, out = client.run_command( 'echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:' + "/var/www/miq/vmdb/lib/" + foldername + '/lib/linux-64" >> /etc/default/evm' ) if status != 0: print "SDK installation failure (rc:" + out + ")" print out sys.exit(1) # service evmserverd restart if args.restart: print "Appliance restart" status, out = client.run_command("service evmserverd restart") print "evmserverd restarted, the UI should start shortly." else: print "evmserverd must be restarted before netapp sdk can be used"
def setup_collectd(perf_data): command_str = "until ping -c1 " + str(perf_data['appliance']['ip_address'] ) + " &>/dev/null; do sleep 5; done" print subprocess.Popen(command_str, shell=True, stdout=subprocess.PIPE).stdout.read() id_pub = subprocess.Popen("ssh-keygen -y -f ~/.ssh/id_rsa_t", shell=True, stdout=subprocess.PIPE).stdout.read() commandstring = "echo \"" + str(id_pub) + "\" > ~/.ssh/authorized_keys" ssh_client = SSHClient() ssh_client.run_command(commandstring) version_string = get_current_version_string().replace(".", "") appliance_name_update = perf_data['appliance']['appliance_name'].replace( "LATEST", version_string) perf_data['appliance']['appliance_name'] = appliance_name_update stream = open("cfme-performance/conf/data.yml", "r") datayml = yaml.load(stream) perf_data['tools']['grafana']['ip_address'] = datayml['grafana']['ip'] perf_data['tools']['grafana']['enabled'] = 'true' hosts_local = "[monitorhost]\n" + str( perf_data['tools']['grafana']['ip_address']) + "\n\n" hosts_local = hosts_local + "[cfme-vmdb]\n" + perf_data['appliance'][ 'appliance_name'] + "\n\n" hosts_local = hosts_local + "[cfme-worker]\n\n[cfme-worker]\n\n[cfme-all-in-one]\n\n[rhevm]\n" hostfile = open("ansible/hosts.local", "w") hostfile.write(hosts_local) hostfile.close() cstr = "\n\tIdentityFile ~/.ssh/id_rsa_t\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile=/dev/null" ssh_config = "Host " + perf_data['appliance'][ 'appliance_name'] + "\n\tHostname " + perf_data['appliance'][ 'ip_address'] + cstr ssh_config = ssh_config + "\nHost " + datayml['grafana'][ 'host'] + "\n\tHostname " + datayml['grafana']['ip'] + cstr #print ssh_config sshfile = open('ansible/ssh-config.local', 'w') sshfile.write(ssh_config) sshfile.close() stream = open("cfme-performance/conf/all.yml", "r") allstream = yaml.load(stream) allstream['appliances'][perf_data['appliance']['appliance_name']] = {} allstream['appliances'][perf_data['appliance'][ 'appliance_name']] = allstream['appliances']['CF-B2B-R0000-test'] del allstream['appliances']['CF-B2B-R0000-test'] with open('ansible/group_vars/all.local.yml', 'w') as outfile: yaml.dump(allstream, outfile, default_flow_style=False) subprocess.Popen("sleep 300", shell=True) print subprocess.Popen( "ansible-playbook -i hosts.local configure/postdeploy.yml -vvv", shell=True, stdout=subprocess.PIPE, cwd="ansible").stdout.read()
def fix_merkyl_workaround(): """Workaround around merkyl not opening an iptables port for communication""" ssh_client = SSHClient() if ssh_client.run_command('test -f /etc/init.d/merkyl').rc == 0: logger.info('Rudely overwriting merkyl init.d on appliance;') local_file = data_path.join("bundles").join("merkyl").join("merkyl") remote_file = "/etc/init.d/merkyl" ssh_client.put_file(local_file.strpath, remote_file) ssh_client.run_command("service merkyl restart")
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('db_address', help='hostname or ip address of external database') parser.add_argument('--database', default='vmdb_production', help='name of the external database') parser.add_argument('--region', default=0, type=int, help='region to assign to the new DB') parser.add_argument('--username', default=credentials['database']['username'], help='username for external database') parser.add_argument('--password', default=credentials['database']['password'], help='password for external database') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'host': args.db_address, 'database': args.database, 'region': args.region, 'username': args.username, 'password': args.password } # Find and load our rb template with replacements base_path = os.path.dirname(__file__) rbt = datafile.data_path_for_filename('enable-external-db.rbt', base_path) rb = datafile.load_data_file(rbt, rbt_repl) # Init SSH client and sent rb file over to /tmp remote_file = '/tmp/%s' % generate_random_string() client = SSHClient(**ssh_kwargs) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done print 'Initializing Appliance External DB' status, out = client.run_command('ruby %s' % remote_file) client.run_command('rm %s' % remote_file) if status != 0: print 'Enabling DB failed with error:' print out sys.exit(1) else: print 'DB Enabled, evm watchdog should start the UI shortly.'
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) snmp_path = scripts_data_path.join("snmp") # Copy print("Copying files") client.put_file( snmp_path.join("snmp_listen.rb").strpath, "/root/snmp_listen.rb") client.put_file( snmp_path.join("snmp_listen.sh").strpath, "/root/snmp_listen.sh") # Enable after startup print("Enabling after startup") status = client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0] if status != 0: client.run_command( "echo 'cd /root/ && ./snmp_listen.sh start' >> /etc/rc.local") assert client.run_command( "grep 'snmp_listen[.]sh' /etc/rc.local")[0] == 0, "Could not enable!" # Run! print("Starting listener") assert client.run_command( "cd /root/ && ./snmp_listen.sh start")[0] == 0, "Could not start!" # Open the port if not opened print("Opening the port in iptables") status = client.run_command( "grep '--dport 8765' /etc/sysconfig/iptables")[0] if status != 0: # append after the 5432 entry client.run_command( "sed -i '/--dport 5432/a -A INPUT -p tcp -m tcp --dport 8765 -j ACCEPT' " "/etc/sysconfig/iptables") client.run_command("systemctl restart iptables") # Check if accessible try: requests.get("http://{}:8765/".format(args.address)) except requests.exceptions.ConnectionError: print("Could not detect running listener!") exit(2)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('db_address', help='hostname or ip address of external database') parser.add_argument('--database', default='vmdb_production', help='name of the external database') parser.add_argument('--region', default=0, type=int, help='region to assign to the new DB') parser.add_argument('--username', default=credentials['database']['username'], help='username for external database') parser.add_argument('--password', default=credentials['database']['password'], help='password for external database') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'host': args.db_address, 'database': args.database, 'region': args.region, 'username': args.username, 'password': args.password } # Find and load our rb template with replacements base_path = os.path.dirname(__file__) rbt = datafile.data_path_for_filename( 'enable-external-db.rbt', base_path) rb = datafile.load_data_file(rbt, rbt_repl) # Init SSH client and sent rb file over to /tmp remote_file = '/tmp/%s' % generate_random_string() client = SSHClient(**ssh_kwargs) client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done print 'Initializing Appliance External DB' status, out = client.run_command('ruby %s' % remote_file) client.run_command('rm %s' % remote_file) if status != 0: print 'Enabling DB failed with error:' print out sys.exit(1) else: print 'DB Enabled, evm watchdog should start the UI shortly.'
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('sdk_url', help='url to download sdk pkg') parser.add_argument('--restart', help='restart evmserverd after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # start filename = args.sdk_url.split('/')[-1] foldername = os.path.splitext(filename)[0] # download print 'Downloading sdk' status, out = client.run_command('curl %(url)s -o %(file)s > /root/unzip.out 2>&1' % {'url': args.sdk_url, 'file': filename}) # extract print 'Extracting sdk (' + filename + ')' status, out = client.run_command('unzip -o -f -d /var/www/miq/vmdb/lib/ %s' % filename) if status != 0: print out sys.exit(1) # install print 'Installing sdk (' + foldername + ')' status, out = client.run_command('echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:' + '/var/www/miq/vmdb/lib/' + foldername + '/lib/linux-64" >> /etc/default/evm') if status != 0: print 'SDK installation failure (rc:' + out + ')' print out sys.exit(1) # service evmserverd restart if args.restart: print 'Appliance restart' status, out = client.run_command('service evmserverd restart') print 'evmserverd restarted, the UI should start shortly.' else: print 'evmserverd must be restarted before netapp sdk can be used'
def test_verify_revert_snapshot(test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider_crud.get_mgmt_system().get_ip_address( snapshot1.vm.name) print ip ssh_kwargs = { 'username': credentials[provider.data['full_template']['creds']]['username'], 'password': credentials[provider.data['full_template']['creds']]['password'], 'hostname': ip } ssh = SSHClient(**ssh_kwargs) ssh.run_command('touch snapshot1.txt') snapshot1.create() ssh.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_OFF, timeout=720) register_event(test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) test_vm.power_control_from_cfme(option=Vm.POWER_ON, cancel=False) pytest.sel.force_navigate('infrastructure_provider', context={'provider': test_vm.provider_crud}) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_running(test_vm.name), "vm not running") client = SSHClient(**ssh_kwargs) request.addfinalizer(test_vm.delete_from_provider) try: wait_for(lambda: client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name)
def disable_external_auth_openldap(): auth = DatabaseAuthSetting() auth.update() sssd_conf = '/etc/sssd/sssd.conf' httpd_auth = '/etc/pam.d/httpd-auth' manageiq_remoteuser = '******' manageiq_ext_auth = '/etc/httpd/conf.d/manageiq-external-auth.conf' command = 'rm -rf {} && rm -rf {} && rm -rf {} && rm -rf {}'.format( sssd_conf, httpd_auth, manageiq_ext_auth, manageiq_remoteuser) ssh = SSHClient() assert ssh.run_command(command) ssh.run_command('systemctl restart evmserverd') appliance.IPAppliance().wait_for_web_ui() logout()
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) snmp_path = scripts_data_path.join("snmp") # Copy print("Copying files") client.put_file(snmp_path.join("snmp_listen.rb").strpath, "/root/snmp_listen.rb") client.put_file(snmp_path.join("snmp_listen.sh").strpath, "/root/snmp_listen.sh") # Enable after startup print("Enabling after startup") status = client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0] if status != 0: client.run_command("echo 'cd /root/ && ./snmp_listen.sh start' >> /etc/rc.local") assert client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0] == 0, "Could not enable!" # Run! print("Starting listener") assert client.run_command("cd /root/ && ./snmp_listen.sh start")[0] == 0, "Could not start!" # Open the port if not opened print("Opening the port in iptables") status = client.run_command("grep '--dport 8765' /etc/sysconfig/iptables")[0] if status != 0: # append after the 5432 entry client.run_command( "sed -i '/--dport 5432/a -A INPUT -p tcp -m tcp --dport 8765 -j ACCEPT' " "/etc/sysconfig/iptables" ) client.run_command("service iptables restart") # Check if accessible try: requests.get("http://{}:8765/".format(args.address)) except requests.exceptions.ConnectionError: print("Could not detect running listener!") exit(2)
def disable_external_auth_openldap(): auth = DatabaseAuthSetting() auth.update() sssd_conf = '/etc/sssd/sssd.conf' httpd_auth = '/etc/pam.d/httpd-auth' manageiq_remoteuser = '******' manageiq_ext_auth = '/etc/httpd/conf.d/manageiq-external-auth.conf' command = 'rm -rf {} && rm -rf {} && rm -rf {} && rm -rf {}'.format( sssd_conf, httpd_auth, manageiq_ext_auth, manageiq_remoteuser) ssh = SSHClient() assert ssh.run_command(command) ssh.run_command('systemctl restart evmserverd') appliance.IPAppliance().wait_for_web_ui() logout()
def disable_forgery_protection(): starttime = time.time() ssh_client = SSHClient() logger.info('Turning off "allow_forgery_protection"') ssh_client.run_command( "sed -i \'s/allow_forgery_protection = true/allow_forgery_protection = false/\' " "/var/www/miq/vmdb/config/environments/production.rb") ssh_client.run_command("service evmserverd restart") ssh_client.close() timediff = time.time() - starttime logger.info('Turned off "allow_forgery_protection" in: {}'.format(timediff)) yield starttime = time.time() ssh_client = SSHClient() logger.info('Turning on "allow_forgery_protection"') ssh_client.run_command( "sed -i \'s/allow_forgery_protection = false/allow_forgery_protection = true/\' " "/var/www/miq/vmdb/config/environments/production.rb") ssh_client.run_command("service evmserverd restart") ssh_client.close() timediff = time.time() - starttime logger.info('Turned on "allow_forgery_protection" in: {}'.format(timediff))
def disable_forgery_protection(): starttime = time.time() ssh_client = SSHClient() logger.info('Turning off "allow_forgery_protection"') ssh_client.run_command( "sed -i \'s/allow_forgery_protection = true/allow_forgery_protection = false/\' " "/var/www/miq/vmdb/config/environments/production.rb") ssh_client.run_command("service evmserverd restart") ssh_client.close() timediff = time.time() - starttime logger.info( 'Turned off "allow_forgery_protection" in: {}'.format(timediff)) yield starttime = time.time() ssh_client = SSHClient() logger.info('Turning on "allow_forgery_protection"') ssh_client.run_command( "sed -i \'s/allow_forgery_protection = false/allow_forgery_protection = true/\' " "/var/www/miq/vmdb/config/environments/production.rb") ssh_client.run_command("service evmserverd restart") ssh_client.close() timediff = time.time() - starttime logger.info('Turned on "allow_forgery_protection" in: {}'.format(timediff))
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('source', nargs='?', default='ManageIQ', help='Source Domain name') parser.add_argument('dest', nargs='?', default='Default', help='Destination Domain name') parser.add_argument('username', nargs='?', default=credentials['ssh']['username'], help='SSH username for target appliance') parser.add_argument('password', nargs='?', default=credentials['ssh']['password'], help='SSH password for target appliance') args = parser.parse_args() ssh_kwargs = {'username': args.username, 'password': args.password} if args.hostname is not None: ssh_kwargs['hostname'] = args.hostname client = SSHClient(stream_output=True, **ssh_kwargs) # Make sure the working dir exists client.run_command('mkdir -p /tmp/miq') print 'Exporting domain...' export_opts = 'DOMAIN={} EXPORT_DIR=/tmp/miq PREVIEW=false OVERWRITE=true'.format( args.source) export_cmd = 'evm:automate:export {}'.format(export_opts) print export_cmd client.run_rake_command(export_cmd) ro_fix_cmd = "sed -i 's/system: true/system: false/g' /tmp/miq/ManageIQ/__domain__.yaml" client.run_command(ro_fix_cmd) import_opts = 'DOMAIN={} IMPORT_DIR=/tmp/miq PREVIEW=false'.format( args.source) import_opts += ' OVERWRITE=true IMPORT_AS={}'.format(args.dest) import_cmd = 'evm:automate:import {}'.format(import_opts) print import_cmd client.run_rake_command(import_cmd)
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ ssh = SSHClient() ensure_browser_open() login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() logout() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) rc, out = ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data)) assert rc == 0, out assert "failed" not in out.lower( ), "External auth setup failed:\n{}".format(out) login_admin()
def get_appliance(provider): '''Fixture to provision appliance to the provider being tested if necessary''' global appliance_list global appliance_vm_name if provider not in appliance_list: if ('appliances_provider' not in cfme_data['basic_info'].keys() or provider != cfme_data['basic_info']['appliances_provider']): appliance_list[provider] = provision_appliance(provider) else: appliance_list[provider] = re.findall(r'[0-9]+(?:\.[0-9]+){3}', conf.env['base_url'])[0] prov_data = cfme_data['management_systems'][provider] if prov_data['type'] == 'virtualcenter': # ssh in and see if vddk already present, if not, install ssh_kwargs = { 'username': conf.credentials['ssh']['username'], 'password': conf.credentials['ssh']['password'], 'hostname': appliance_list[provider] } # Init SSH client client = SSHClient(**ssh_kwargs) if int( client.run_command("ldconfig -p | grep vix | wc -l") [1]) < 1: install_vddk(appliance_list[provider]) client.close() elif prov_data['type'] == 'rhevm': add_rhev_direct_lun_disk(provider, appliance_vm_name) return appliance_list[provider]
def get_appliance(provider): '''Fixture to provision appliance to the provider being tested if necessary''' global appliance_list global appliance_vm_name if provider not in appliance_list: if ('appliances_provider' not in cfme_data['basic_info'].keys() or provider != cfme_data['basic_info']['appliances_provider']): appliance_list[provider] = provision_appliance(provider) else: appliance_list[provider] = re.findall(r'[0-9]+(?:\.[0-9]+){3}', conf.env['base_url'])[0] prov_data = cfme_data['management_systems'][provider] if prov_data['type'] == 'virtualcenter': # ssh in and see if vddk already present, if not, install ssh_kwargs = { 'username': conf.credentials['ssh']['username'], 'password': conf.credentials['ssh']['password'], 'hostname': appliance_list[provider] } # Init SSH client client = SSHClient(**ssh_kwargs) if int(client.run_command("ldconfig -p | grep vix | wc -l")[1]) < 1: install_vddk(appliance_list[provider]) client.close() elif prov_data['type'] == 'rhevm': add_rhev_direct_lun_disk(provider, appliance_vm_name) return appliance_list[provider]
def get_appliance(provider): """Fixture to provision appliance to the provider being tested if necessary""" global appliance_list global appliance_vm_name if provider not in appliance_list: if ( "appliances_provider" not in cfme_data["basic_info"].keys() or provider != cfme_data["basic_info"]["appliances_provider"] ): appliance_list[provider] = provision_appliance(provider) else: appliance_list[provider] = re.findall(r"[0-9]+(?:\.[0-9]+){3}", conf.env["base_url"])[0] prov_data = cfme_data["management_systems"][provider] if prov_data["type"] == "virtualcenter": # ssh in and see if vddk already present, if not, install ssh_kwargs = { "username": conf.credentials["ssh"]["username"], "password": conf.credentials["ssh"]["password"], "hostname": appliance_list[provider], } # Init SSH client client = SSHClient(**ssh_kwargs) if int(client.run_command("ldconfig -p | grep vix | wc -l")[1]) < 1: install_vddk(appliance_list[provider]) client.close() elif prov_data["type"] == "rhevm": add_rhev_direct_lun_disk(provider, appliance_vm_name) return appliance_list[provider]
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance', nargs='?', default=None) args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], } if args.address: ssh_kwargs['hostname'] = args.address # Init SSH client ssh_client = SSHClient(**ssh_kwargs) # compile assets if required (not required on 5.2) if not ssh_client.get_version().startswith("5.2"): if ssh_client.run_command("ls /var/www/miq/vmdb/public/assets")[0] != 0: ssh_client.run_rake_command("assets:precompile") ssh_client.run_rake_command("evm:restart") print "CFME UI worker restarted, UI should be available shortly"
def get_vm_config_modified_time(name, vm_name, datastore_url, provider_key): try: providers_data = cfme_data.get("management_systems", {}) hosts = providers_data[provider_key]['hosts'] host_creds = providers_data[provider_key].get('host_credentials', 'host_default') hostname = [host['name'] for host in hosts if name in host['name']] if not hostname: hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', name) connect_kwargs = { 'username': credentials[host_creds]['username'], 'password': credentials[host_creds]['password'], 'hostname': hostname[0] } datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url)) ssh_client = SSHClient(**connect_kwargs) command = 'find ~/{}/{} -name {} | xargs date -r'.format( datastore_path[0], str(vm_name), str(vm_name) + '.vmx') exit_status, output = ssh_client.run_command(command) ssh_client.close() modified_time = parser.parse(output.rstrip()) modified_time = modified_time.astimezone( pytz.timezone(str(get_localzone()))) return modified_time.replace(tzinfo=None) except Exception as e: logger.error(e) return False
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ ssh = SSHClient() ensure_browser_open() login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() logout() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) rc, out = ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data) ) assert rc == 0, out assert "failed" not in out.lower(), "External auth setup failed:\n{}".format(out) login_admin()
def get_vm_config_modified_time(name, vm_name, datastore_url, provider_key): try: providers_data = cfme_data.get("management_systems", {}) hosts = providers_data[provider_key]['hosts'] host_creds = providers_data[provider_key].get('host_credentials', 'host_default') hostname = [host['name'] for host in hosts if name in host['name']] if not hostname: hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', name) connect_kwargs = { 'username': credentials[host_creds]['username'], 'password': credentials[host_creds]['password'], 'hostname': hostname[0] } datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url)) ssh_client = SSHClient(**connect_kwargs) command = 'find ~/{}/{} -name {} | xargs date -r'.format( datastore_path[0], str(vm_name), str(vm_name) + '.vmx') exit_status, output = ssh_client.run_command(command) ssh_client.close() modified_time = parser.parse(output.rstrip()) modified_time = modified_time.astimezone(pytz.timezone(str(get_localzone()))) return modified_time.replace(tzinfo=None) except Exception as e: logger.error(e) return False
def net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False): """Checks the availability of a port from outside using another machine (over SSH)""" from utils.ssh import SSHClient port = int(port) if not addr: addr = my_ip_address() if port not in _ports[addr] or force: if not machine_addr: machine_addr = urlparse.urlparse(store.base_url).hostname if not ssh_creds: ssh = SSHClient(hostname=machine_addr) else: ssh = SSHClient(hostname=machine_addr, username=ssh_creds['username'], password=ssh_creds['password']) with ssh: # on exception => fails with return code 1 cmd = '''python -c " import sys, socket addr = socket.gethostbyname('%s') socket.create_connection((addr, %d), timeout=10) sys.exit(0) "''' % (addr, port) ret, out = ssh.run_command(cmd) if ret == 0: _ports[addr][port] = True else: _ports[addr][port] = False return _ports[addr][port]
def net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False): """Checks the availability of a port from outside using another machine (over SSH)""" from utils.ssh import SSHClient port = int(port) if not addr: addr = my_ip_address() if port not in _ports[addr] or force: if not machine_addr: machine_addr = urlparse.urlparse(store.base_url).hostname if not ssh_creds: ssh = store.current_appliance.ssh_client else: ssh = SSHClient( hostname=machine_addr, username=ssh_creds['username'], password=ssh_creds['password'] ) with ssh: # on exception => fails with return code 1 cmd = '''python -c " import sys, socket addr = socket.gethostbyname('%s') socket.create_connection((addr, %d), timeout=10) sys.exit(0) "''' % (addr, port) ret, out = ssh.run_command(cmd) if ret == 0: _ports[addr][port] = True else: _ports[addr][port] = False return _ports[addr][port]
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('repo_url', help='updates base url') parser.add_argument('--reboot', help='reboot after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # create repo file repo_file = "[rhel-updates]\nname=rhel6-updates\nbaseurl=" + \ args.repo_url + "\nenabled=1\ngpgcheck=0" # create repo file on appliance print 'Create update repo file' status, out = client.run_command( 'echo "%s" >/etc/yum.repos.d/rhel_updates.repo' % repo_file) # update print 'Running rhel updates...' status, out = client.run_command('yum update -y --nogpgcheck') print "\n" + out + "\n" if status != 0: print "ERROR during update" sys.exit(1) # reboot if args.reboot: print 'Appliance reboot' status, out = client.run_command('reboot') else: print 'A reboot is recommended.'
def disable_external_auth_ipa(): """Unconfigure external auth.""" ssh = SSHClient() ensure_browser_open() login_admin() auth = DatabaseAuthSetting() auth.update() rc, out = ssh.run_command("appliance_console_cli --uninstall-ipa") assert rc == 0, out
def disable_external_auth_ipa(): """Unconfigure external auth.""" ssh = SSHClient() ensure_browser_open() login_admin() auth = DatabaseAuthSetting() auth.update() rc, out = ssh.run_command("appliance_console_cli --uninstall-ipa") assert rc == 0, out
def test_verify_revert_snapshot(test_vm, provider_key, provider_type, provider_data, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider_crud.get_mgmt_system().get_ip_address(snapshot1.vm.name) print ip ssh_kwargs = { 'username': credentials[provider_data['full_template']['creds']]['username'], 'password': credentials[provider_data['full_template']['creds']]['password'], 'hostname': ip } ssh = SSHClient(**ssh_kwargs) ssh.run_command('touch snapshot1.txt') snapshot1.create() ssh.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_OFF, timeout=720) register_event( test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) test_vm.power_control_from_cfme(option=Vm.POWER_ON, cancel=False) pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider_crud}) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_running(test_vm.name), "vm not running") client = SSHClient(**ssh_kwargs) request.addfinalizer(test_vm.delete_from_provider) try: wait_for(lambda: client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name)
def disable_external_auth_ipa(): """Unconfigure external auth.""" ssh = SSHClient() ensure_browser_open() login_admin() auth = DatabaseAuthSetting() auth.update() assert ssh.run_command("appliance_console_cli --uninstall-ipa") appliance.IPAppliance().wait_for_web_ui() logout()
def disable_external_auth_ipa(): """Unconfigure external auth.""" ssh = SSHClient() ensure_browser_open() login_admin() auth = DatabaseAuthSetting() auth.update() assert ssh.run_command("appliance_console_cli --uninstall-ipa") appliance.IPAppliance().wait_for_web_ui() logout()
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('repo_url', help='updates base url') parser.add_argument('--reboot', help='reboot after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # create repo file repo_file = "[rhel-updates]\nname=rhel6-updates\nbaseurl=" + args.repo_url + "\nenabled=1\ngpgcheck=0" # create repo file on appliance print 'Create update repo file' status, out = client.run_command('echo "%s" >/etc/yum.repos.d/rhel_updates.repo' % repo_file) # update print 'Running rhel updates...' status, out = client.run_command('yum update -y --nogpgcheck') print "\n" + out + "\n" if status != 0: print "ERROR during update" sys.exit(1) # reboot if args.reboot: print 'Appliance reboot' status, out = client.run_command('reboot') else: print 'A reboot is recommended.'
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('username', nargs='?', default=credentials['ssh']['username'], help='SSH username for target appliance') parser.add_argument('password', nargs='?', default=credentials['ssh']['password'], help='SSH password for target appliance') args = parser.parse_args() ssh_kwargs = {'username': args.username, 'password': args.password} if args.hostname is not None: ssh_kwargs['hostname'] = args.hostname client = SSHClient(stream_output=True, **ssh_kwargs) # `service evmserverd stop` is a little slow, and we're destroying the # db, so rudely killing ruby speeds things up significantly print('Stopping ruby processes...') client.run_command('killall ruby') client.run_rake_command('evm:db:reset') client.run_command('service evmserverd start') print('Waiting for appliance UI...') args = [ scripts_path.join('wait_for_appliance_ui.py').strpath, # SSHClient has the smarts to get our hostname if none was provided # Soon, utils.appliance.Appliance will be able to do all of this # and this will be made good 'http://%s' % client._connect_kwargs['hostname'] ] return subprocess.call(args)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("address", help="hostname or ip address of target appliance") parser.add_argument("repo_url", help="updates base url") parser.add_argument( "--reboot", help="reboot after installation " + "(required for proper operation)", action="store_true" ) args = parser.parse_args() ssh_kwargs = { "username": credentials["ssh"]["username"], "password": credentials["ssh"]["password"], "hostname": args.address, } # Init SSH client client = SSHClient(**ssh_kwargs) # create repo file repo_file = "[rhel-updates]\nname=rhel6-updates\nbaseurl=" + args.repo_url + "\nenabled=1\ngpgcheck=0" # create repo file on appliance print "Create update repo file" status, out = client.run_command('echo "%s" >/etc/yum.repos.d/rhel_updates.repo' % repo_file) # update print "Running rhel updates..." status, out = client.run_command("yum update -y --nogpgcheck") print "\n" + out + "\n" if status != 0: print "ERROR during update" sys.exit(1) # reboot if args.reboot: print "Appliance reboot" status, out = client.run_command("reboot") else: print "A reboot is recommended."
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('source', nargs='?', default='ManageIQ', help='Source Domain name') parser.add_argument('dest', nargs='?', default='Default', help='Destination Domain name') parser.add_argument('username', nargs='?', default=credentials['ssh']['username'], help='SSH username for target appliance') parser.add_argument('password', nargs='?', default=credentials['ssh']['password'], help='SSH password for target appliance') args = parser.parse_args() ssh_kwargs = { 'username': args.username, 'password': args.password } if args.hostname is not None: ssh_kwargs['hostname'] = args.hostname client = SSHClient(stream_output=True, **ssh_kwargs) # Make sure the working dir exists client.run_command('mkdir -p /tmp/miq') print 'Exporting domain...' export_opts = 'DOMAIN={} EXPORT_DIR=/tmp/miq PREVIEW=false OVERWRITE=true'.format(args.source) export_cmd = 'evm:automate:export {}'.format(export_opts) print export_cmd client.run_rake_command(export_cmd) ro_fix_cmd = "sed -i 's/system: true/system: false/g' /tmp/miq/ManageIQ/__domain__.yaml" client.run_command(ro_fix_cmd) import_opts = 'DOMAIN={} IMPORT_DIR=/tmp/miq PREVIEW=false'.format(args.source) import_opts += ' OVERWRITE=true IMPORT_AS={}'.format(args.dest) import_cmd = 'evm:automate:import {}'.format(import_opts) print import_cmd client.run_rake_command(import_cmd)
def test_verify_revert_snapshot(test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name) ssh_kwargs = { 'username': credentials[provider.data['full_template']['creds']]['username'], 'password': credentials[provider.data['full_template']['creds']]['password'], 'hostname': ip } ssh = SSHClient(**ssh_kwargs) ssh.run_command('touch snapshot1.txt') snapshot1.create() ssh.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=720) register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='request_vm_start') register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='vm_start') test_vm.power_control_from_cfme(option=test_vm.POWER_ON, cancel=False) navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm not running") client = SSHClient(**ssh_kwargs) try: wait_for(lambda: client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name)
def test_basic_metrics(provider): """ Basic Metrics availability test This test checks that the Metrics service is up Curls the hawkular status page and checks if it's up """ username, password = provider.credentials['token'].principal,\ provider.credentials['token'].secret hostname = conf.cfme_data.get('management_systems', {})[provider.key]\ .get('hostname', []) host_url = 'https://' + hostname + '/hawkular/metrics/' command = 'curl -X GET ' + host_url + ' --insecure' ssh_client = SSHClient(hostname=hostname, username=username, password=password) assert re.search("Hawkular[ -]Metrics", str(ssh_client.run_command(command)))
def get_worker_pid(worker_type): """Obtains the pid of the first worker with the worker_type specified""" ssh_client = SSHClient() exit_status, out = ssh_client.run_command('service evmserverd status 2> /dev/null | grep -m 1 ' '\'{}\' | awk \'{{print $7}}\''.format(worker_type)) worker_pid = str(out).strip() if out: logger.info('Obtained {} PID: {}'.format(worker_type, worker_pid)) else: logger.error('Could not obtain {} PID, check evmserverd running or if specific role is' ' enabled...'.format(worker_type)) assert out return worker_pid
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # generate installed rpm list status, out = client.run_command('rpm -qa | sort > /tmp/installed_rpms.txt') client.get_file('/tmp/installed_rpms.txt', 'installed_rpms.txt') # compress logs dir status, out = client.run_command('cd /var/www/miq/vmdb; tar zcvf /tmp/appliance_logs.tgz log') client.get_file('/tmp/appliance_logs.tgz', 'appliance_logs.tgz')
def test_verify_revert_snapshot(test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name) ssh_kwargs = { 'username': credentials[provider.data['full_template']['creds']]['username'], 'password': credentials[provider.data['full_template']['creds']]['password'], 'hostname': ip } ssh = SSHClient(**ssh_kwargs) ssh.run_command('touch snapshot1.txt') snapshot1.create() ssh.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=720) register_event('VmOrTemplate', test_vm.name, ['request_vm_start', 'vm_start']) test_vm.power_control_from_cfme(option=test_vm.POWER_ON, cancel=False) navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm not running") client = SSHClient(**ssh_kwargs) try: wait_for(lambda: client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('username', nargs='?', default=credentials['ssh']['username'], help='SSH username for target appliance') parser.add_argument('password', nargs='?', default=credentials['ssh']['password'], help='SSH password for target appliance') args = parser.parse_args() ssh_kwargs = { 'username': args.username, 'password': args.password } if args.hostname is not None: ssh_kwargs['hostname'] = args.hostname client = SSHClient(stream_output=True, **ssh_kwargs) # `service evmserverd stop` is a little slow, and we're destroying the # db, so rudely killing ruby speeds things up significantly print 'Stopping ruby processes...' client.run_command('killall ruby') client.run_rake_command('evm:db:reset') client.run_command('service evmserverd start') print 'Waiting for appliance UI...' args = [ scripts_path.join('wait_for_appliance_ui.py').strpath, # SSHClient has the smarts to get our hostname if none was provided # Soon, utils.appliance.Appliance will be able to do all of this # and this will be made good 'http://%s' % client._connect_kwargs['hostname'] ] return subprocess.call(args)
def get_worker_pid(worker_type): """Obtains the pid of the first worker with the worker_type specified""" ssh_client = SSHClient() exit_status, out = ssh_client.run_command( 'service evmserverd status 2> /dev/null | grep -m 1 ' '\'{}\' | awk \'{{print $7}}\''.format(worker_type)) worker_pid = str(out).strip() if out: logger.info('Obtained {} PID: {}'.format(worker_type, worker_pid)) else: logger.error( 'Could not obtain {} PID, check evmserverd running or if specific role is' ' enabled...'.format(worker_type)) assert out return worker_pid
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaserver'], } appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = appliance.IPAppliance().address appliance_fqdn = '{}.{}'.format(appliance_name, data['iparealm'].lower()) ipaserver_ssh = SSHClient(**connect_kwargs) ipaserver_ssh.run_command('cp /etc/hosts /etc/hosts_bak') ipaserver_ssh.run_command( "sed -i -r '/^{}/d' /etc/hosts".format(appliance_address)) command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ipaserver_ssh.run_command(command) ipaserver_ssh.close() ssh = SSHClient() assert ssh.run_command( 'appliance_console_cli --host {}'.format(appliance_fqdn)) ensure_browser_open() login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) assert ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data)) login_admin()
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaserver'], } appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = appliance.IPAppliance().address appliance_fqdn = '{}.{}'.format(appliance_name, data['iparealm'].lower()) ipaserver_ssh = SSHClient(**connect_kwargs) ipaserver_ssh.run_command('cp /etc/hosts /etc/hosts_bak') ipaserver_ssh.run_command("sed -i -r '/^{}/d' /etc/hosts".format(appliance_address)) command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ipaserver_ssh.run_command(command) ipaserver_ssh.close() ssh = SSHClient() assert ssh.run_command('appliance_console_cli --host {}'.format(appliance_fqdn)) ensure_browser_open() login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) assert ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data) ) login_admin()
class OcpCli(object): """This class provides CLI functionality for Openshift provider. """ def __init__(self, provider): provider_cfme_data = provider.get_yaml_data() self.hostname = provider_cfme_data['hostname'] creds = conf.configuration.yaycl_config.credentials if hasattr(creds, provider.key): prov_creds = getattr(creds, provider.key) self.username = prov_creds.username self.password = prov_creds.password self.ssh_client = SSHClient(hostname=self.hostname, username=self.username, password=self.password) else: # Try with known hosts self.ssh_client = SSHClient() self.ssh_client.load_system_host_keys() self.ssh_client.connect(self.hostname) self._command_counter = 0 self.log_line_limit = 500 def run_command(self, *args, **kwargs): logger.info('{} - Running SSH Command#{} : {}'.format( self.hostname, self._command_counter, args[0])) results = self.ssh_client.run_command(*args, **kwargs) results_short = results[:max((self.log_line_limit, len(results)))] if results.success: logger.info('{} - Command#{} - Succeed: {}'.format( self.hostname, self._command_counter, results_short)) else: logger.warning('{} - Command#{} - Failed: {}'.format( self.hostname, self._command_counter, results_short)) self._command_counter += 1 return results def close(self): self.ssh_client.close()
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaserver'], } import fauxfactory appliance_name = 'cfmeappliance'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = appliance.IPAppliance().address appliance_fqdn = '{}.{}'.format(appliance_name, data['iparealm'].lower()) ipaserver_ssh = SSHClient(**connect_kwargs) # updating the /etc/hosts is a workaround due to the # https://bugzilla.redhat.com/show_bug.cgi?id=1360928 command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ipaserver_ssh.run_command(command) ipaserver_ssh.close() ssh = SSHClient() rc, out = ssh.run_command('appliance_console_cli --host {}'.format(appliance_fqdn)) assert rc == 0, out ssh.run_command('echo "127.0.0.1\t{}" > /etc/hosts'.format(appliance_fqdn)) ensure_browser_open() login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() logout() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) rc, out = ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data) ) assert rc == 0, out assert "failed" not in out.lower(), "External auth setup failed:\n{}".format(out) login_admin()
def list_orphaned_files_per_host(host_name, host_datastore_urls, provider_key, vm_registered_files, unregistered_files): try: providers_data = cfme_data.get("management_systems", {}) hosts = providers_data[provider_key]['hosts'] hostname = [host['name'] for host in hosts if host_name in host['name']] # check if hostname returned is ipaddress if not hostname: hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', host_name) connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': hostname[0] } for datastore_url in host_datastore_urls: datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url)) ssh_client = SSHClient(**connect_kwargs) command = 'ls ~/{}'.format(datastore_path[0]) exit_status, output = ssh_client.run_command(command) ssh_client.close() files_in_datastore = output.splitlines() if exit_status == 0 else [] for fil in files_in_datastore: if fil not in vm_registered_files: file_type = 'UNKNOWN' number_of_files = 0 command = 'test -d ~/{}/{}; echo $?'.format(datastore_path[0], fil) exit_status, output = ssh_client.run_command(command) ssh_client.close() file_extension = re.findall(r'.*\.(\w*)', fil) if file_extension: file_type = file_extension[0] number_of_files = 1 if int(output.strip()) == 0: command = 'ls ~/{}/{} | wc -l'.format(datastore_path[0], fil) exit_status, output = ssh_client.run_command(command) number_of_files = output.strip() command = 'find ~/{}/{} -name "*.vmx" | wc -l'.format( datastore_path[0], fil) vmx_status, vmx_output = ssh_client.run_command(command) command = 'find ~/{}/{} -name "*.vmtx" | wc -l'.format( datastore_path[0], fil) vmtx_status, vmtx_output = ssh_client.run_command(command) command = 'find ~/{}/{} -name "*.vmdk" | wc -l'.format( datastore_path[0], fil) vmdk_status, vmdk_output = ssh_client.run_command(command) ssh_client.close() if int(vmx_output.strip()) > 0: file_type = 'VirtualMachine' elif int(vmtx_output.strip()) > 0: file_type = 'Template' elif int(vmdk_output.strip()) > 0: file_type = 'VMDK' # delete_this = '~/' + datastore_path[0] + fil # command = 'rm -rf {}'.format(delete_this) # exit_status, output = ssh_client.run_command(command) # logger.info(output) file_path = '~/' + datastore_path[0] + fil if file_path not in unregistered_files: unregistered_files.append(file_path) print('{}\t\t{}\t\t{}\t\t{}'.format( hostname[0], file_path, file_type, number_of_files)) except Exception as e: logger.error(e) return False
def connect_direct_lun_to_appliance(self, vm_name, disconnect): """Connects or disconnects the direct lun disk to an appliance. Args: vm_name: Name of the VM with the appliance. disconnect: If False, it will connect, otherwise it will disconnect """ if "provider_key" in self.kwargs: provider_name = self.kwargs["provider_key"] else: raise TypeError("provider_key not supplied to the provider.") # check that the vm exists on the rhev provider, get the ip address if so try: vm = self.api.vms.get(vm_name) ip_addr = self.get_ip_address(vm_name) except: raise NameError("{} not found on {}".format(vm_name, provider_name)) # check for direct lun definition on provider's cfme_data.yaml if "direct_lun" not in self.kwargs: raise ValueError("direct_lun key not in cfme_data.yaml under provider {}, exiting...".format(provider_name)) # does the direct lun exist prov_data = self.kwargs dlun_name = prov_data["direct_lun"]["name"] dlun = self.api.disks.get(dlun_name) if dlun is None: # Create the iSCSI storage connection: sc = params.StorageConnection() sc.set_address(prov_data["direct_lun"]["ip_address"]) sc.set_type("iscsi") sc.set_port(int(prov_data["direct_lun"]["port"])) sc.set_target(prov_data["direct_lun"]["iscsi_target"]) # Add the direct LUN disk: lu = params.LogicalUnit() lu.set_id(prov_data["direct_lun"]["iscsi_target"]) lu.set_address(sc.get_address()) lu.set_port(sc.get_port()) lu.set_target(sc.get_target()) storage = params.Storage() storage.set_type("iscsi") storage.set_logical_unit([lu]) disk = params.Disk() disk.set_name(dlun_name) disk.set_interface("virtio") disk.set_type("iscsi") disk.set_format("raw") disk.set_lun_storage(storage) disk.set_shareable(True) disk = self.api.disks.add(disk) dlun = self.api.disks.get(dlun_name) # add it if not disconnect: retries = 0 while retries < 3: retries += 1 direct_lun = params.Disk(id=dlun.id) try: # is the disk present and active? vm_disk_list = vm.get_disks().list() for vm_disk in vm_disk_list: if vm_disk.name == dlun_name: if vm_disk.active: return else: vm_disk.activate() return # if not present, add it and activate direct_lun = params.Disk(id=dlun.id) added_lun = vm.disks.add(direct_lun) added_lun.activate() except Exception as e: logger.error("Exception caught: %s" % str(e)) if retries == 3: logger.error("exhausted retries and giving up") raise else: logger.info("sleeping for 30s and retrying to connect direct lun") time.sleep(30) # Init SSH client, run pvscan on the appliance ssh_kwargs = { "username": conf.credentials["ssh"]["username"], "password": conf.credentials["ssh"]["password"], "hostname": ip_addr, } client = SSHClient(**ssh_kwargs) status, out = client.run_command("pvscan", timeout=5 * 60) # remove it else: vm_dlun = vm.disks.get(name=dlun_name) if vm_dlun is None: return else: detach = params.Action(detach=True) vm_dlun.delete(action=detach)
def connect_direct_lun_to_appliance(self, vm_name, disconnect): """Connects or disconnects the direct lun disk to an appliance. Args: vm_name: Name of the VM with the appliance. disconnect: If False, it will connect, otherwise it will disconnect """ if "provider_key" in self.kwargs: provider_name = self.kwargs["provider_key"] else: raise TypeError("provider_key not supplied to the provider.") # check that the vm exists on the rhev provider, get the ip address if so try: vm = self.api.vms.get(vm_name) ip_addr = self.get_ip_address(vm_name) except: raise NameError("{} not found on {}".format( vm_name, provider_name)) # check for direct lun definition on provider's cfme_data.yaml if 'direct_lun' not in self.kwargs: raise ValueError( "direct_lun key not in cfme_data.yaml under provider {}, exiting..." .format(provider_name)) # does the direct lun exist prov_data = self.kwargs dlun_name = prov_data['direct_lun']['name'] dlun = self.api.disks.get(dlun_name) if dlun is None: # Create the iSCSI storage connection: sc = params.StorageConnection() sc.set_address(prov_data['direct_lun']['ip_address']) sc.set_type("iscsi") sc.set_port(int(prov_data['direct_lun']['port'])) sc.set_target(prov_data['direct_lun']['iscsi_target']) # Add the direct LUN disk: lu = params.LogicalUnit() lu.set_id(prov_data['direct_lun']['iscsi_target']) lu.set_address(sc.get_address()) lu.set_port(sc.get_port()) lu.set_target(sc.get_target()) storage = params.Storage() storage.set_type("iscsi") storage.set_logical_unit([lu]) disk = params.Disk() disk.set_name(dlun_name) disk.set_interface("virtio") disk.set_type("iscsi") disk.set_format("raw") disk.set_lun_storage(storage) disk.set_shareable(True) disk = self.api.disks.add(disk) dlun = self.api.disks.get(dlun_name) # add it if not disconnect: retries = 0 while retries < 3: retries += 1 direct_lun = params.Disk(id=dlun.id) try: # is the disk present and active? vm_disk_list = vm.get_disks().list() for vm_disk in vm_disk_list: if vm_disk.name == dlun_name: if vm_disk.active: return else: vm_disk.activate() return # if not present, add it and activate direct_lun = params.Disk(id=dlun.id) added_lun = vm.disks.add(direct_lun) added_lun.activate() except Exception as e: logger.error("Exception caught: %s", str(e)) if retries == 3: logger.error("exhausted retries and giving up") raise else: logger.info( "sleeping for 30s and retrying to connect direct lun" ) time.sleep(30) # Init SSH client, run pvscan on the appliance ssh_kwargs = { 'username': conf.credentials['ssh']['username'], 'password': conf.credentials['ssh']['password'], 'hostname': ip_addr } client = SSHClient(**ssh_kwargs) status, out = client.run_command('pvscan', timeout=5 * 60) # remove it else: vm_dlun = vm.disks.get(name=dlun_name) if vm_dlun is None: return else: detach = params.Action(detach=True) vm_dlun.delete(action=detach)
def get_current_version_string(): """Returns string contents of /var/www/miq/vmdb/VERSION""" ssh_client = SSHClient() exit_status, current_version = ssh_client.run_command('cat /var/www/miq/vmdb/VERSION') return current_version.strip()
def list_orphaned_files_per_host(host_name, host_datastore_urls, provider_key, vm_registered_files, unregistered_files): try: providers_data = cfme_data.get("management_systems", {}) hosts = providers_data[provider_key]['hosts'] hostname = [ host['name'] for host in hosts if host_name in host['name'] ] # check if hostname returned is ipaddress if not hostname: hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', host_name) connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': hostname[0] } for datastore_url in host_datastore_urls: datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url)) ssh_client = SSHClient(**connect_kwargs) command = 'ls ~/{}'.format(datastore_path[0]) exit_status, output = ssh_client.run_command(command) ssh_client.close() files_in_datastore = output.splitlines( ) if exit_status == 0 else [] for fil in files_in_datastore: if fil not in vm_registered_files: file_type = 'UNKNOWN' number_of_files = 0 command = 'test -d ~/{}/{}; echo $?'.format( datastore_path[0], fil) exit_status, output = ssh_client.run_command(command) ssh_client.close() file_extension = re.findall(r'.*\.(\w*)', fil) if file_extension: file_type = file_extension[0] number_of_files = 1 if int(output.strip()) == 0: command = 'ls ~/{}/{} | wc -l'.format( datastore_path[0], fil) exit_status, output = ssh_client.run_command(command) number_of_files = output.strip() command = 'find ~/{}/{} -name "*.vmx" | wc -l'.format( datastore_path[0], fil) vmx_status, vmx_output = ssh_client.run_command( command) command = 'find ~/{}/{} -name "*.vmtx" | wc -l'.format( datastore_path[0], fil) vmtx_status, vmtx_output = ssh_client.run_command( command) command = 'find ~/{}/{} -name "*.vmdk" | wc -l'.format( datastore_path[0], fil) vmdk_status, vmdk_output = ssh_client.run_command( command) ssh_client.close() if int(vmx_output.strip()) > 0: file_type = 'VirtualMachine' elif int(vmtx_output.strip()) > 0: file_type = 'Template' elif int(vmdk_output.strip()) > 0: file_type = 'VMDK' # delete_this = '~/' + datastore_path[0] + fil # command = 'rm -rf {}'.format(delete_this) # exit_status, output = ssh_client.run_command(command) # logger.info(output) file_path = '~/' + datastore_path[0] + fil if file_path not in unregistered_files: unregistered_files.append(file_path) print('{}\t\t{}\t\t{}\t\t{}'.format( hostname[0], file_path, file_type, number_of_files)) except Exception as e: logger.error(e) return False
class UiCoveragePlugin(object): def __init__(self): self.ssh_client = SSHClient() # trylast so that terminalreporter's been configured before ui-coverage @pytest.mark.trylast def pytest_configure(self, config): # Eventually, the setup/teardown work for coverage should be handled by # utils.appliance.Appliance to make multi-appliance support easy self.reporter = config.pluginmanager.getplugin('terminalreporter') self.reporter.write_sep('-', 'Setting up UI coverage reporting') self.install_simplecov() self.install_coverage_hook() self.restart_evm() self.touch_all_the_things() check_appliance_ui(base_url()) def pytest_unconfigure(self, config): self.reporter.write_sep( '-', 'Waiting for coverage to finish and collecting reports') self.stop_touching_all_the_things() self.merge_reports() self.collect_reports() self.print_report() def install_simplecov(self): logger.info('Installing coverage gems on appliance') self.ssh_client.put_file(gemfile.strpath, rails_root.strpath) x, out = self.ssh_client.run_command( 'cd {}; bundle'.format(rails_root)) return x == 0 def install_coverage_hook(self): logger.info('Installing coverage hook on appliance') # Put the coverage hook in the miq lib path self.ssh_client.put_file( coverage_hook.strpath, rails_root.join('..', 'lib', coverage_hook.basename).strpath) replacements = { 'require': r"require 'coverage_hook'", 'config': rails_root.join('config').strpath } # grep/echo to try to add the require line only once # This goes in preinitializer after the miq lib path is set up, # which makes it so ruby can actually require the hook command_template = ( 'cd {config};' 'grep -q "{require}" preinitializer.rb || echo -e "\\n{require}" >> preinitializer.rb' ) x, out = self.ssh_client.run_command( command_template.format(**replacements)) return x == 0 def restart_evm(self, rude=True): logger.info('Restarting EVM to enable coverage reporting') # This is rude by default (issuing a kill -9 on ruby procs), since the most common use-case # will be to set up coverage on a freshly provisioned appliance in a jenkins run if rude: x, out = self.ssh_client.run_command( 'killall -9 ruby; service evmserverd start') else: x, out = self.ssh_client.run_comment('service evmserverd restart') return x == 0 def touch_all_the_things(self): logger.info( 'Establishing baseline overage by requiring ALL THE THINGS') # send over the thing toucher self.ssh_client.put_file( thing_toucher.strpath, rails_root.join(thing_toucher.basename).strpath) # start it in an async process so we can go one testing while this takes place self._thing_toucher_proc = Process(target=_thing_toucher_mp_handler, args=[self.ssh_client]) self._thing_toucher_proc.start() def stop_touching_all_the_things(self): logger.info('Waiting for baseline coverage generator to finish') # block while the thing toucher is still running self._thing_toucher_proc.join() return self._thing_toucher_proc.exitcode == 0 def merge_reports(self): logger.info("Merging coverage reports on appliance") # install the merger script self.ssh_client.put_file( coverage_merger.strpath, rails_root.join(coverage_merger.basename).strpath) # don't async this one since it's happening in unconfigure # merge/clean up the coverage reports x, out = self.ssh_client.run_rails_command('coverage_merger.rb') return x == 0 def collect_reports(self): coverage_dir = log_path.join('coverage') # clean out old coverage dir if it exists if coverage_dir.check(): coverage_dir.remove(rec=True, ignore_errors=True) # Then ensure the the empty dir exists coverage_dir.ensure(dir=True) # then copy the remote coverage dir into it logger.info("Collecting coverage reports to {}".format( coverage_dir.strpath)) logger.info("Report collection can take several minutes") self.ssh_client.get_file(rails_root.join('coverage').strpath, log_path.strpath, recursive=True) def print_report(self): try: last_run = json.load( log_path.join('coverage', '.last_run.json').open()) coverage = last_run['result']['covered_percent'] # TODO: Make the happy vs. sad coverage color configurable, and set it to something # good once we know what good is style = {'bold': True} if coverage > 40: style['green'] = True else: style['red'] = True self.reporter.line('UI Coverage Result: {}%'.format(coverage), **style) except KeyboardInterrupt: # don't block this, so users can cancel out raise except: logger.error( 'Error printing coverage report to terminal, traceback follows' ) logger.error(traceback.format_exc())
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--provider', dest='provider_name', help='provider name in cfme_data') parser.add_argument('--vm_name', help='the name of the VM on which to act') parser.add_argument('--remove', help='remove disk from vm', action="store_true") args = parser.parse_args() provider = provider_factory(args.provider_name) # check that we are working with a rhev provider if not isinstance(provider, RHEVMSystem): sys.exit(args.providername + " is not a RHEVM system, exiting...") # check that the vm exists on the rhev provider, get the ip address if so try: vm = provider.api.vms.get(args.vm_name) ip_addr = provider.get_ip_address(args.vm_name) except: sys.exit(args.vm_name + " vm not found on provider " + args.providername + ", exiting...") # check for direct lun definition on provider's cfme_data.yaml if 'direct_lun_name' not in cfme_data['management_systems'][args.provider_name]: sys.exit("direct_lun_name key not in cfme_data.yaml under provider " + args.providername + ", exiting...") # does the direct lun exist dlun_name = cfme_data['management_systems'][args.provider_name]['direct_lun_name'] dlun = provider.api.disks.get(dlun_name) if dlun is None: sys.exit("Direct lun disk named " + dlun_name + " is not found on provider " + args.provider_name) # add it if not args.remove: # is the disk present and active? vm_disk_list = vm.get_disks().list() for vm_disk in vm_disk_list: if vm_disk.name == dlun_name: if vm_disk.active: return else: vm_disk.actvate() return # if not present, add it and activate direct_lun = params.Disk(id=dlun.id) added_lun = vm.disks.add(direct_lun) added_lun.activate() # Init SSH client, run pvscan on the appliance ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': ip_addr } client = SSHClient(**ssh_kwargs) status, out = client.run_command('pvscan') # remove it else: vm_dlun = vm.disks.get(name=dlun_name) if vm_dlun is None: return else: detach = params.Action(detach=True) vm_dlun.delete(action=detach)
def test_verify_revert_snapshot(test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name) ssh_kwargs = { 'username': credentials[provider.data['full_template']['creds']]['username'], 'password': credentials[provider.data['full_template']['creds']]['password'], 'hostname': ip } ssh_client = SSHClient(**ssh_kwargs) # We need to wait for ssh to become available on the vm, it can take a while. Without # this wait, the ssh command would fail with 'port 22 not available' error. # Easiest way to solve this is just mask the exception with 'handle_exception = True' # and wait for successful completition of the ssh command. # The 'fail_func' ensures we close the connection that failed with exception. # Without this, the connection would hang there and wait_for would fail with timeout. wait_for(lambda: ssh_client.run_command('touch snapshot1.txt').rc == 0, num_sec=300, delay=20, handle_exception=True, fail_func=ssh_client.close()) snapshot1.create() register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='vm_snapshot_complete') register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='vm_snapshot') ssh_client.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=720) test_vm.power_control_from_cfme(option=test_vm.POWER_ON, cancel=False) navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_ON, timeout=900) current_state = test_vm.find_quadicon().state soft_assert(current_state.startswith('currentstate-on'), "Quadicon state is {}".format(current_state)) soft_assert(test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm not running") wait_for(lambda: ssh_client.run_command('test -e snapshot1.txt').rc == 0, num_sec=400, delay=20, handle_exception=True, fail_func=ssh_client.close()) try: result = ssh_client.run_command('test -e snapshot1.txt') assert not result.rc result = ssh_client.run_command('test -e snapshot2.txt') assert result.rc logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.exception('Revert to snapshot %s Failed', snapshot1.name)
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--provider', dest='provider_name', help='provider name in cfme_data') parser.add_argument('--vm_name', help='the name of the VM on which to act') parser.add_argument('--remove', help='remove disk from vm', action="store_true") args = parser.parse_args() provider = provider_factory(args.provider_name) # check that we are working with a rhev provider if not isinstance(provider, RHEVMSystem): sys.exit(args.providername + " is not a RHEVM system, exiting...") # check that the vm exists on the rhev provider, get the ip address if so try: vm = provider.api.vms.get(args.vm_name) ip_addr = provider.get_ip_address(args.vm_name) except: sys.exit(args.vm_name + " vm not found on provider " + args.providername + ", exiting...") # check for direct lun definition on provider's cfme_data.yaml if 'direct_lun_name' not in cfme_data['management_systems'][ args.provider_name]: sys.exit("direct_lun_name key not in cfme_data.yaml under provider " + args.providername + ", exiting...") # does the direct lun exist dlun_name = cfme_data['management_systems'][ args.provider_name]['direct_lun_name'] dlun = provider.api.disks.get(dlun_name) if dlun is None: sys.exit("Direct lun disk named " + dlun_name + " is not found on provider " + args.provider_name) # add it if not args.remove: # is the disk present and active? vm_disk_list = vm.get_disks().list() for vm_disk in vm_disk_list: if vm_disk.name == dlun_name: if vm_disk.active: return else: vm_disk.actvate() return # if not present, add it and activate direct_lun = params.Disk(id=dlun.id) added_lun = vm.disks.add(direct_lun) added_lun.activate() # Init SSH client, run pvscan on the appliance ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': ip_addr } client = SSHClient(**ssh_kwargs) status, out = client.run_command('pvscan') # remove it else: vm_dlun = vm.disks.get(name=dlun_name) if vm_dlun is None: return else: detach = params.Action(detach=True) vm_dlun.delete(action=detach)
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--address', help='hostname or ip address of target appliance', default=parse_if_not_none(env.get("base_url", None))) parser.add_argument('--sdk_url', help='url to download sdk pkg', default=cfme_data.get("basic_info", {}).get("netapp_sdk_url", None)) parser.add_argument('--restart', help='restart evmserverd after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # start filename = args.sdk_url.split('/')[-1] foldername = os.path.splitext(filename)[0] # download print 'Downloading sdk' status, out = client.run_command( 'wget {url} -O {file} > /root/unzip.out 2>&1'.format(url=args.sdk_url, file=filename)) # extract print 'Extracting sdk ({})'.format(filename) status, out = client.run_command( 'unzip -o -d /var/www/miq/vmdb/lib/ {}'.format(filename)) if status != 0: print out sys.exit(1) # install print 'Installing sdk ({})'.format(foldername) path = "/var/www/miq/vmdb/lib/{}/lib/linux-64".format(foldername) # Check if we haven't already added this line if client.run_command( "grep -F '{}' /etc/default/evm".format(path))[0] != 0: status, out = client.run_command( 'echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:{}" >> /etc/default/evm' .format(path)) if status != 0: print 'SDK installation failure (rc: {})'.format(out) print out sys.exit(1) else: print "Not needed to install, already done" print "Running ldconfig" client.run_command("ldconfig") print "Modifying YAML configuration" yaml = get_yaml_config("vmdb") yaml["product"]["storage"] = True set_yaml_config("vmdb", yaml) client.run_command("touch /var/www/miq/vmdb/HAS_NETAPP" ) # To mark that we installed netapp # service evmserverd restart if args.restart: print 'Appliance restart' status, out = client.run_command('reboot &') time.sleep(30) # To prevent clobbing with appliance shutting down print 'evmserverd restarted, the UI should start shortly.' else: print 'evmserverd must be restarted before netapp sdk can be used'
def get_current_version_string(): """Returns string contents of /var/www/miq/vmdb/VERSION""" ssh_client = SSHClient() exit_status, current_version = ssh_client.run_command( 'cat /var/www/miq/vmdb/VERSION') return current_version.strip()
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance', nargs='?', default=None) parser.add_argument('--with_ssl', help='update for ssl connections', action="store_true") args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], } if args.address: ssh_kwargs['hostname'] = args.address # Init SSH client client = SSHClient(**ssh_kwargs) # set root password client.run_command("psql -d vmdb_production -c \"alter user " + credentials['database']['username'] + " with password '" + credentials['database']['password'] + "'\"") # back up pg_hba.conf client.run_command('mv /opt/rh/postgresql92/root/var/lib/pgsql/data/pg_hba.conf ' + '/opt/rh/postgresql92/root/var/lib/pgsql/data/pg_hba.conf.sav') # rewrite pg_hba.conf client.run_command("echo 'local all postgres,root trust' > " + "/opt/rh/postgresql92/root/var/lib/pgsql/data/pg_hba.conf") client.run_command("echo 'host all all 0.0.0.0/0 md5' >> " + "/opt/rh/postgresql92/root/var/lib/pgsql/data/pg_hba.conf") if args.with_ssl: client.run_command("echo 'hostssl all all all cert map=sslmap' >> " + "/opt/rh/postgresql92/root/var/lib/pgsql/data/pg_hba.conf") client.run_command("chown postgres:postgres " + "/opt/rh/postgresql92/root/var/lib/pgsql/data/pg_hba.conf") # restart postgres client.run_command("service postgresql92-postgresql restart")