def get_vm_config_modified_time(name, vm_name, datastore_url, provider_key): try: providers_data = cfme_data.get("management_systems", {}) hosts = providers_data[provider_key]['hosts'] host_creds = providers_data[provider_key].get('host_credentials', 'host_default') hostname = [host['name'] for host in hosts if name in host['name']] if not hostname: hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', name) connect_kwargs = { 'username': credentials[host_creds]['username'], 'password': credentials[host_creds]['password'], 'hostname': hostname[0] } datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url)) ssh_client = SSHClient(**connect_kwargs) command = 'find ~/{}/{} -name {} | xargs date -r'.format( datastore_path[0], str(vm_name), str(vm_name) + '.vmx') exit_status, output = ssh_client.run_command(command) ssh_client.close() modified_time = parser.parse(output.rstrip()) modified_time = modified_time.astimezone( pytz.timezone(str(get_localzone()))) return modified_time.replace(tzinfo=None) except Exception as e: logger.error(e) return False
def net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False): """Checks the availability of a port from outside using another machine (over SSH)""" from utils.ssh import SSHClient port = int(port) if not addr: addr = my_ip_address() if port not in _ports[addr] or force: if not machine_addr: machine_addr = urlparse.urlparse(store.base_url).hostname if not ssh_creds: ssh = store.current_appliance.ssh_client else: ssh = SSHClient( hostname=machine_addr, username=ssh_creds['username'], password=ssh_creds['password'] ) with ssh: # on exception => fails with return code 1 cmd = '''python -c " import sys, socket addr = socket.gethostbyname('%s') socket.create_connection((addr, %d), timeout=10) sys.exit(0) "''' % (addr, port) ret, out = ssh.run_command(cmd) if ret == 0: _ports[addr][port] = True else: _ports[addr][port] = False return _ports[addr][port]
def get_vm_config_modified_time(name, vm_name, datastore_url, provider_key): try: providers_data = cfme_data.get("management_systems", {}) hosts = providers_data[provider_key]['hosts'] host_creds = providers_data[provider_key].get('host_credentials', 'host_default') hostname = [host['name'] for host in hosts if name in host['name']] if not hostname: hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', name) connect_kwargs = { 'username': credentials[host_creds]['username'], 'password': credentials[host_creds]['password'], 'hostname': hostname[0] } datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url)) ssh_client = SSHClient(**connect_kwargs) command = 'find ~/{}/{} -name {} | xargs date -r'.format( datastore_path[0], str(vm_name), str(vm_name) + '.vmx') exit_status, output = ssh_client.run_command(command) ssh_client.close() modified_time = parser.parse(output.rstrip()) modified_time = modified_time.astimezone(pytz.timezone(str(get_localzone()))) return modified_time.replace(tzinfo=None) except Exception as e: logger.error(e) return False
def get_appliance(provider): """Fixture to provision appliance to the provider being tested if necessary""" global appliance_list global appliance_vm_name if provider not in appliance_list: if ( "appliances_provider" not in cfme_data["basic_info"].keys() or provider != cfme_data["basic_info"]["appliances_provider"] ): appliance_list[provider] = provision_appliance(provider) else: appliance_list[provider] = re.findall(r"[0-9]+(?:\.[0-9]+){3}", conf.env["base_url"])[0] prov_data = cfme_data["management_systems"][provider] if prov_data["type"] == "virtualcenter": # ssh in and see if vddk already present, if not, install ssh_kwargs = { "username": conf.credentials["ssh"]["username"], "password": conf.credentials["ssh"]["password"], "hostname": appliance_list[provider], } # Init SSH client client = SSHClient(**ssh_kwargs) if int(client.run_command("ldconfig -p | grep vix | wc -l")[1]) < 1: install_vddk(appliance_list[provider]) client.close() elif prov_data["type"] == "rhevm": add_rhev_direct_lun_disk(provider, appliance_vm_name) return appliance_list[provider]
def get_appliance(provider): '''Fixture to provision appliance to the provider being tested if necessary''' global appliance_list global appliance_vm_name if provider not in appliance_list: if ('appliances_provider' not in cfme_data['basic_info'].keys() or provider != cfme_data['basic_info']['appliances_provider']): appliance_list[provider] = provision_appliance(provider) else: appliance_list[provider] = re.findall(r'[0-9]+(?:\.[0-9]+){3}', conf.env['base_url'])[0] prov_data = cfme_data['management_systems'][provider] if prov_data['type'] == 'virtualcenter': # ssh in and see if vddk already present, if not, install ssh_kwargs = { 'username': conf.credentials['ssh']['username'], 'password': conf.credentials['ssh']['password'], 'hostname': appliance_list[provider] } # Init SSH client client = SSHClient(**ssh_kwargs) if int( client.run_command("ldconfig -p | grep vix | wc -l") [1]) < 1: install_vddk(appliance_list[provider]) client.close() elif prov_data['type'] == 'rhevm': add_rhev_direct_lun_disk(provider, appliance_vm_name) return appliance_list[provider]
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ ssh = SSHClient() ensure_browser_open() login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() logout() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) rc, out = ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data) ) assert rc == 0, out assert "failed" not in out.lower(), "External auth setup failed:\n{}".format(out) login_admin()
def get_appliance(provider): '''Fixture to provision appliance to the provider being tested if necessary''' global appliance_list global appliance_vm_name if provider not in appliance_list: if ('appliances_provider' not in cfme_data['basic_info'].keys() or provider != cfme_data['basic_info']['appliances_provider']): appliance_list[provider] = provision_appliance(provider) else: appliance_list[provider] = re.findall(r'[0-9]+(?:\.[0-9]+){3}', conf.env['base_url'])[0] prov_data = cfme_data['management_systems'][provider] if prov_data['type'] == 'virtualcenter': # ssh in and see if vddk already present, if not, install ssh_kwargs = { 'username': conf.credentials['ssh']['username'], 'password': conf.credentials['ssh']['password'], 'hostname': appliance_list[provider] } # Init SSH client client = SSHClient(**ssh_kwargs) if int(client.run_command("ldconfig -p | grep vix | wc -l")[1]) < 1: install_vddk(appliance_list[provider]) client.close() elif prov_data['type'] == 'rhevm': add_rhev_direct_lun_disk(provider, appliance_vm_name) return appliance_list[provider]
def net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False): """Checks the availability of a port from outside using another machine (over SSH)""" from utils.ssh import SSHClient port = int(port) if not addr: addr = my_ip_address() if port not in _ports[addr] or force: if not machine_addr: machine_addr = urlparse.urlparse(store.base_url).hostname if not ssh_creds: ssh = SSHClient(hostname=machine_addr) else: ssh = SSHClient(hostname=machine_addr, username=ssh_creds['username'], password=ssh_creds['password']) with ssh: # on exception => fails with return code 1 cmd = '''python -c " import sys, socket addr = socket.gethostbyname('%s') socket.create_connection((addr, %d), timeout=10) sys.exit(0) "''' % (addr, port) ret, out = ssh.run_command(cmd) if ret == 0: _ports[addr][port] = True else: _ports[addr][port] = False return _ports[addr][port]
def setSSHConnection(self): self.ssh_conn = SSHClient(log_path=self.output_dir + "/cumulus_" + self.userid + ".ssh.log") self.ssh_conn.connect(hostname=self.server, username=self.userid, password=self.password)
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ ssh = SSHClient() ensure_browser_open() login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() logout() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) rc, out = ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data)) assert rc == 0, out assert "failed" not in out.lower( ), "External auth setup failed:\n{}".format(out) login_admin()
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("address", help="hostname or ip address of target appliance") parser.add_argument("sdk_url", help="url to download sdk pkg") parser.add_argument( "--restart", help="restart evmserverd after installation " + "(required for proper operation)", action="store_true", ) args = parser.parse_args() ssh_kwargs = { "username": credentials["ssh"]["username"], "password": credentials["ssh"]["password"], "hostname": args.address, } # Init SSH client client = SSHClient(**ssh_kwargs) # start filename = args.sdk_url.split("/")[-1] foldername = os.path.splitext(filename)[0] # download print "Downloading sdk" status, out = client.run_command( "curl %(url)s -o %(file)s > /root/unzip.out 2>&1" % {"url": args.sdk_url, "file": filename} ) # extract print "Extracting sdk (" + filename + ")" status, out = client.run_command("unzip -o -f -d /var/www/miq/vmdb/lib/ %s" % filename) if status != 0: print out sys.exit(1) # install print "Installing sdk (" + foldername + ")" status, out = client.run_command( 'echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:' + "/var/www/miq/vmdb/lib/" + foldername + '/lib/linux-64" >> /etc/default/evm' ) if status != 0: print "SDK installation failure (rc:" + out + ")" print out sys.exit(1) # service evmserverd restart if args.restart: print "Appliance restart" status, out = client.run_command("service evmserverd restart") print "evmserverd restarted, the UI should start shortly." else: print "evmserverd must be restarted before netapp sdk can be used"
def setup_collectd(perf_data): command_str = "until ping -c1 " + str(perf_data['appliance']['ip_address'] ) + " &>/dev/null; do sleep 5; done" print subprocess.Popen(command_str, shell=True, stdout=subprocess.PIPE).stdout.read() id_pub = subprocess.Popen("ssh-keygen -y -f ~/.ssh/id_rsa_t", shell=True, stdout=subprocess.PIPE).stdout.read() commandstring = "echo \"" + str(id_pub) + "\" > ~/.ssh/authorized_keys" ssh_client = SSHClient() ssh_client.run_command(commandstring) version_string = get_current_version_string().replace(".", "") appliance_name_update = perf_data['appliance']['appliance_name'].replace( "LATEST", version_string) perf_data['appliance']['appliance_name'] = appliance_name_update stream = open("cfme-performance/conf/data.yml", "r") datayml = yaml.load(stream) perf_data['tools']['grafana']['ip_address'] = datayml['grafana']['ip'] perf_data['tools']['grafana']['enabled'] = 'true' hosts_local = "[monitorhost]\n" + str( perf_data['tools']['grafana']['ip_address']) + "\n\n" hosts_local = hosts_local + "[cfme-vmdb]\n" + perf_data['appliance'][ 'appliance_name'] + "\n\n" hosts_local = hosts_local + "[cfme-worker]\n\n[cfme-worker]\n\n[cfme-all-in-one]\n\n[rhevm]\n" hostfile = open("ansible/hosts.local", "w") hostfile.write(hosts_local) hostfile.close() cstr = "\n\tIdentityFile ~/.ssh/id_rsa_t\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile=/dev/null" ssh_config = "Host " + perf_data['appliance'][ 'appliance_name'] + "\n\tHostname " + perf_data['appliance'][ 'ip_address'] + cstr ssh_config = ssh_config + "\nHost " + datayml['grafana'][ 'host'] + "\n\tHostname " + datayml['grafana']['ip'] + cstr #print ssh_config sshfile = open('ansible/ssh-config.local', 'w') sshfile.write(ssh_config) sshfile.close() stream = open("cfme-performance/conf/all.yml", "r") allstream = yaml.load(stream) allstream['appliances'][perf_data['appliance']['appliance_name']] = {} allstream['appliances'][perf_data['appliance'][ 'appliance_name']] = allstream['appliances']['CF-B2B-R0000-test'] del allstream['appliances']['CF-B2B-R0000-test'] with open('ansible/group_vars/all.local.yml', 'w') as outfile: yaml.dump(allstream, outfile, default_flow_style=False) subprocess.Popen("sleep 300", shell=True) print subprocess.Popen( "ansible-playbook -i hosts.local configure/postdeploy.yml -vvv", shell=True, stdout=subprocess.PIPE, cwd="ansible").stdout.read()
def disable_external_auth_ipa(): """Unconfigure external auth.""" ssh = SSHClient() ensure_browser_open() login_admin() auth = DatabaseAuthSetting() auth.update() rc, out = ssh.run_command("appliance_console_cli --uninstall-ipa") assert rc == 0, out
def disable_external_auth_ipa(): """Unconfigure external auth.""" ssh = SSHClient() ensure_browser_open() login_admin() auth = DatabaseAuthSetting() auth.update() assert ssh.run_command("appliance_console_cli --uninstall-ipa") appliance.IPAppliance().wait_for_web_ui() logout()
def use_storage(uses_ssh): ssh_client = SSHClient() if ssh_client.appliance_has_netapp(): return if not current_version().is_in_series("5.2"): pytest.skip("Storage tests run only on .2 so far") subprocess.call("python ./scripts/install_netapp_lib.py --restart", shell=True) subprocess.call("python ./scripts/wait_for_appliance_ui.py", shell=True) if not ssh_client.appliance_has_netapp(): pytest.fail("Could not setup the netapp for storage testing")
def set_default_domain(): if current_version() < "5.3": return # Domains are not in 5.2.x and lower ssh_client = SSHClient() # The command ignores the case when the Default domain is not present (: true) result = ssh_client.run_rails_command( "\"d = MiqAeDomain.where :name => 'Default'; puts (d) ? d.first.enabled : true\"") if result.output.lower().strip() != "true": # Re-enable the domain ssh_client.run_rails_command( "\"d = MiqAeDomain.where :name => 'Default'; d = d.first; d.enabled = true; d.save!\"")
def set_yaml_config(config_name, data_dict, hostname=None): """Given a yaml name, dictionary and hostname, set the configuration yaml on the server The configuration yamls must be inserted into the DB using the ruby console, so this function uses SSH, not the database. It makes sense to be included here as a counterpart to :py:func:`get_yaml_config` Args: config_name: Name of the yaml configuration file data_dict: Dictionary with data to set/change hostname: Hostname/address of the server that we want to set up (default ``None``) Note: If hostname is set to ``None``, the default server set up for this session will be used. See :py:class:``utils.ssh.SSHClient`` for details of the default setup. Warning: Manually editing the config yamls is potentially dangerous. Furthermore, the rails runner doesn't return useful information on the outcome of the set request, so errors that arise from the newly loading config file will go unreported. Usage: # Update the appliance name, for example vmbd_yaml = get_yaml_config('vmdb') vmdb_yaml['server']['name'] = 'EVM IS AWESOME' set_yaml_config('vmdb', vmdb_yaml, '1.2.3.4') """ # CFME does a lot of things when loading a configfile, so # let their native conf loader handle the job # If hostname is defined, connect to the specified server if hostname is not None: _ssh_client = SSHClient(hostname=hostname) # Else, connect to the default one set up for this session else: _ssh_client = SSHClient() # Build & send new config temp_yaml = NamedTemporaryFile() dest_yaml = '/tmp/conf.yaml' yaml.dump(data_dict, temp_yaml, default_flow_style=False) _ssh_client.put_file(temp_yaml.name, dest_yaml) # Build and send ruby script dest_ruby = '/tmp/load_conf.rb' ruby_template = data_path.join('utils', 'cfmedb_load_config.rbt') ruby_replacements = {'config_name': config_name, 'config_file': dest_yaml} temp_ruby = load_data_file(ruby_template.strpath, ruby_replacements) _ssh_client.put_file(temp_ruby.name, dest_ruby) # Run it _ssh_client.run_rails_command(dest_ruby)
def test_verify_revert_snapshot(test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name) ssh_kwargs = { 'username': credentials[provider.data['full_template']['creds']]['username'], 'password': credentials[provider.data['full_template']['creds']]['password'], 'hostname': ip } with SSHClient(**ssh_kwargs) as ssh_client: ssh_client.run_command('touch snapshot1.txt') snapshot1.create() ssh_client.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=720) register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='request_vm_start') register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='vm_start') test_vm.power_control_from_cfme(option=test_vm.POWER_ON, cancel=False) navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert(test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm not running") with SSHClient(**ssh_kwargs) as ssh_client: try: wait_for(lambda: ssh_client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name)
def test_basic_metrics(provider): """ Basic Metrics availability test This test checks that the Metrics service is up Curls the hawkular status page and checks if it's up """ username, password = provider.credentials['token'].principal,\ provider.credentials['token'].secret hostname = conf.cfme_data.get('management_systems', {})[provider.key]\ .get('hostname', []) host_url = 'https://' + hostname + '/hawkular/metrics/' command = 'curl -X GET ' + host_url + ' --insecure' ssh_client = SSHClient(hostname=hostname, username=username, password=password) assert re.search("Hawkular[ -]Metrics", str(ssh_client.run_command(command)))
def get_worker_pid(worker_type): """Obtains the pid of the first worker with the worker_type specified""" ssh_client = SSHClient() exit_status, out = ssh_client.run_command('service evmserverd status 2> /dev/null | grep -m 1 ' '\'{}\' | awk \'{{print $7}}\''.format(worker_type)) worker_pid = str(out).strip() if out: logger.info('Obtained {} PID: {}'.format(worker_type, worker_pid)) else: logger.error('Could not obtain {} PID, check evmserverd running or if specific role is' ' enabled...'.format(worker_type)) assert out return worker_pid
def disable_external_auth_openldap(): auth = DatabaseAuthSetting() auth.update() sssd_conf = '/etc/sssd/sssd.conf' httpd_auth = '/etc/pam.d/httpd-auth' manageiq_remoteuser = '******' manageiq_ext_auth = '/etc/httpd/conf.d/manageiq-external-auth.conf' command = 'rm -rf {} && rm -rf {} && rm -rf {} && rm -rf {}'.format( sssd_conf, httpd_auth, manageiq_ext_auth, manageiq_remoteuser) ssh = SSHClient() assert ssh.run_command(command) ssh.run_command('systemctl restart evmserverd') appliance.IPAppliance().wait_for_web_ui() logout()
def setup_external_auth_openldap(**data): """Sets up the appliance for an external authentication with OpenLdap. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaddress'], } appliance_obj = appliance.IPAppliance() appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = appliance_obj.address appliance_fqdn = '{}.{}'.format(appliance_name, data['domain_name']) ldapserver_ssh = SSHClient(**connect_kwargs) # updating the /etc/hosts is a workaround due to the # https://bugzilla.redhat.com/show_bug.cgi?id=1360928 command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ldapserver_ssh.run_command(command) ldapserver_ssh.get_file(remote_file=data['cert_filepath'], local_path=conf_path.strpath) ldapserver_ssh.close() ensure_browser_open() login_admin() auth = ExternalAuthSetting(get_groups=data.pop("get_groups", True)) auth.setup() appliance_obj.configure_appliance_for_openldap_ext_auth(appliance_fqdn) logout()
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # generate installed rpm list status, out = client.run_command( 'rpm -qa | sort > /tmp/installed_rpms.txt') client.get_file('/tmp/installed_rpms.txt', 'installed_rpms.txt') # compress logs dir status, out = client.run_command( 'cd /var/www/miq/vmdb; tar zcvf /tmp/appliance_logs.tgz log') client.get_file('/tmp/appliance_logs.tgz', 'appliance_logs.tgz')
def test_idle_default(request): """Runs an appliance at idle for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() clean_appliance(ssh_client) quantifiers = {} scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-idle', 'test_name': 'Idle with Default Roles', 'appliance_roles': get_server_roles_workload_idle_default(separator=', '), 'scenario': { 'name': 'default' } } monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_default_dashboard_url(from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer( lambda: cleanup_workload(from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) # No need to set server roles as we are using the default set of roles s_time = cfme_performance['workloads']['test_idle_default']['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) quantifiers['Elapsed_Time'] = s_time logger.info('Test Ending...')
def get_worker_pid(worker_type): """Obtains the pid of the first worker with the worker_type specified""" ssh_client = SSHClient() exit_status, out = ssh_client.run_command( 'service evmserverd status 2> /dev/null | grep -m 1 ' '\'{}\' | awk \'{{print $7}}\''.format(worker_type)) worker_pid = str(out).strip() if out: logger.info('Obtained {} PID: {}'.format(worker_type, worker_pid)) else: logger.error( 'Could not obtain {} PID, check evmserverd running or if specific role is' ' enabled...'.format(worker_type)) assert out return worker_pid
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('source', nargs='?', default='ManageIQ', help='Source Domain name') parser.add_argument('dest', nargs='?', default='Default', help='Destination Domain name') parser.add_argument('username', nargs='?', default=credentials['ssh']['username'], help='SSH username for target appliance') parser.add_argument('password', nargs='?', default=credentials['ssh']['password'], help='SSH password for target appliance') args = parser.parse_args() ssh_kwargs = {'username': args.username, 'password': args.password} if args.hostname is not None: ssh_kwargs['hostname'] = args.hostname client = SSHClient(stream_output=True, **ssh_kwargs) # Make sure the working dir exists client.run_command('mkdir -p /tmp/miq') print 'Exporting domain...' export_opts = 'DOMAIN={} EXPORT_DIR=/tmp/miq PREVIEW=false OVERWRITE=true'.format( args.source) export_cmd = 'evm:automate:export {}'.format(export_opts) print export_cmd client.run_rake_command(export_cmd) ro_fix_cmd = "sed -i 's/system: true/system: false/g' /tmp/miq/ManageIQ/__domain__.yaml" client.run_command(ro_fix_cmd) import_opts = 'DOMAIN={} IMPORT_DIR=/tmp/miq PREVIEW=false'.format( args.source) import_opts += ' OVERWRITE=true IMPORT_AS={}'.format(args.dest) import_cmd = 'evm:automate:import {}'.format(import_opts) print import_cmd client.run_rake_command(import_cmd)
def db_restore(temp_appliance_extended_db): app = temp_appliance_extended_db app.stop_evm_service() app.drop_database() db_storage_hostname = conf.cfme_data['bottlenecks']['hostname'] db_storage = SSHClient(hostname=db_storage_hostname, **conf.credentials['bottlenecks']) with db_storage as ssh: # Different files for different versions ver = "_56" if current_version() < '5.7' else "" rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/home/backups/otsuman_db_bottlenecks/v2_key{}".format(ver), rand_filename) dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/home/backups/otsuman_db_bottlenecks/db.backup{}".format(ver), dump_filename) region_filename = "/tmp/REGION_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/home/backups/otsuman_db_bottlenecks/REGION{}".format(ver), region_filename) guid_filename = "/tmp/GUID_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/home/backups/otsuman_db_bottlenecks/GUID{}".format(ver), guid_filename) with app.ssh_client as ssh: ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") ssh.put_file(dump_filename, "/tmp/evm_db.backup") ssh.put_file(region_filename, "/var/www/miq/vmdb/REGION") ssh.put_file(guid_filename, "/var/www/miq/vmdb/GUID") app.restore_database() app.start_evm_service() app.wait_for_web_ui()
def sat6_unregister(): with SSHClient() as ssh: ssh.run_command('subscription-manager remove --all') ssh.run_command('subscription-manager unregister') ssh.run_command('subscription-manager clean') ssh.run_command('mv -f /etc/rhsm/rhsm.conf.kat-backup /etc/rhsm/rhsm.conf') ssh.run_command('rpm -qa | grep katello-ca-consumer | xargs rpm -e')
def __init__(self, provider): provider_cfme_data = provider.get_yaml_data() self.hostname = provider_cfme_data['hostname'] creds = provider_cfme_data.get('ssh_creds') if not creds: raise Exception( 'Could not find ssh_creds in provider\'s cfme data.') if isinstance(creds, dict): self.username = creds.get('username') self.password = creds.get('password') else: self.username = credentials[creds].get('username') self.password = credentials[creds].get('password') with SSHClient(hostname=self.hostname, username=self.username, password=self.password, look_for_keys=True) as ssh_client: self.ssh_client = ssh_client self.ssh_client.load_system_host_keys() self._command_counter = 0 self.log_line_limit = 500
def make_ssh_client(ip, sshname, sshpass): connect_kwargs = { 'username': sshname, 'password': sshpass, 'hostname': ip } return SSHClient(**connect_kwargs)
def test_navigate_explorer(): """Initial Example Workload, Add Provider, turn on some things, initiate navigations on the WebUI from python. Currently lets disable cleaning the appliance etc.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() # clean_appliance(ssh_client) cfme_ip = cfme_performance['appliance']['ip_address'] cfme_web_ui_user = cfme_performance['appliance']['web_ui']['username'] cfme_web_ui_password = cfme_performance['appliance']['web_ui']['password'] url = "https://{}/".format(cfme_ip) params = {"user_name": cfme_web_ui_user, "user_password": cfme_web_ui_password } with requests.Session() as sess: r = sess.get("{}{}".format(url, "api/auth?requester_type=ui"), auth=HTTPBasicAuth(cfme_web_ui_user, cfme_web_ui_password), verify=False, allow_redirects=False) r = sess.post("{}{}".format(url, "dashboard/authenticate"), params=params, verify=False, allow_redirects=False) dump_r(r) # Get a protected page now: #r = sess.get(url + 'dashboard/show', verify=False, allow_redirects=False) for i in range(10): r = sess.get("{}{}".format(url, "dashboard/show"), verify=False) r = sess.get("{}{}".format(url, "vm_infra/explorer"), verify=False)
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } with SSHClient(**ssh_kwargs) as ssh: print("Setting appliance's time. Please wait.") servers_str = " ".join( ["'%s'" % server for server in cfme_data["clock_servers"]]) status, out = ssh.run_command("ntpdate " + servers_str) if status != 0: print( "Could not set the time. Check the output of the command, please:" ) print(out.strip()) return 1 print( "Time was set. Now it should be safe to log in and test on the appliance." ) return 0
def make_ssh_client(ssh_host, ssh_user, ssh_pass): connect_kwargs = { 'username': ssh_user, 'password': ssh_pass, 'hostname': ssh_host } return SSHClient(**connect_kwargs)
def test_idle(request, scenario): """Runs an appliance at idle with specific roles turned on for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-idle', 'test_name': 'Idle with {} Roles'.format(scenario['name']), 'appliance_roles': ', '.join(scenario['roles']), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer( lambda: cleanup_workload(from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles(ssh_client, scenario['roles']) s_time = scenario['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) quantifiers['Elapsed_Time'] = s_time logger.info('Test Ending...')
def test_verify_revert_snapshot(test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider_crud.get_mgmt_system().get_ip_address( snapshot1.vm.name) print ip ssh_kwargs = { 'username': credentials[provider.data['full_template']['creds']]['username'], 'password': credentials[provider.data['full_template']['creds']]['password'], 'hostname': ip } ssh = SSHClient(**ssh_kwargs) ssh.run_command('touch snapshot1.txt') snapshot1.create() ssh.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_OFF, timeout=720) register_event(test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) test_vm.power_control_from_cfme(option=Vm.POWER_ON, cancel=False) pytest.sel.force_navigate('infrastructure_provider', context={'provider': test_vm.provider_crud}) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_running(test_vm.name), "vm not running") client = SSHClient(**ssh_kwargs) request.addfinalizer(test_vm.delete_from_provider) try: wait_for(lambda: client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name)
def set_yaml_config(config_name, data_dict, hostname=None): """Given a yaml name, dictionary and hostname, set the configuration yaml on the server The configuration yamls must be inserted into the DB using the ruby console, so this function uses SSH, not the database. It makes sense to be included here as a counterpart to :py:func:`get_yaml_config` Args: config_name: Name of the yaml configuration file data_dict: Dictionary with data to set/change hostname: Hostname/address of the server that we want to set up (default ``None``) Note: If hostname is set to ``None``, the default server set up for this session will be used. See :py:class:``utils.ssh.SSHClient`` for details of the default setup. Warning: Manually editing the config yamls is potentially dangerous. Furthermore, the rails runner doesn't return useful information on the outcome of the set request, so errors that arise from the newly loading config file will go unreported. Usage: # Update the appliance name, for example vmbd_yaml = get_yaml_config('vmdb') vmdb_yaml['server']['name'] = 'EVM IS AWESOME' set_yaml_config('vmdb', vmdb_yaml, '1.2.3.4') """ # CFME does a lot of things when loading a configfile, so # let their native conf loader handle the job # If hostname is defined, connect to the specified server if hostname is not None: _ssh_client = SSHClient(hostname=hostname) # Else, connect to the default one set up for this session else: _ssh_client = SSHClient() # Build & send new config temp_yaml = NamedTemporaryFile() dest_yaml = '/tmp/conf.yaml' yaml.dump(data_dict, temp_yaml, default_flow_style=False) _ssh_client.put_file(temp_yaml.name, dest_yaml) # Build and send ruby script dest_ruby = '/tmp/load_conf.rb' ruby_template = data_path.join('utils', 'cfmedb_load_config.rbt') ruby_replacements = { 'config_name': config_name, 'config_file': dest_yaml } temp_ruby = load_data_file(ruby_template.strpath, ruby_replacements) _ssh_client.put_file(temp_ruby.name, dest_ruby) # Run it _ssh_client.run_rails_command(dest_ruby)
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client with SSHClient(**ssh_kwargs) as ssh_client: snmp_path = scripts_data_path.join("snmp") # Copy print("Copying files") ssh_client.put_file( snmp_path.join("snmp_listen.rb").strpath, "/root/snmp_listen.rb") ssh_client.put_file( snmp_path.join("snmp_listen.sh").strpath, "/root/snmp_listen.sh") # Enable after startup print("Enabling after startup") status = ssh_client.run_command( "grep 'snmp_listen[.]sh' /etc/rc.local")[0] if status != 0: ssh_client.run_command( "echo 'cd /root/ && ./snmp_listen.sh start' >> /etc/rc.local") assert ssh_client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local")[0] == 0, \ "Could not enable!" # Run! print("Starting listener") assert ssh_client.run_command("cd /root/ && ./snmp_listen.sh start")[0] == 0, \ "Could not start!" # Open the port if not opened print("Opening the port in iptables") status = ssh_client.run_command( "grep '--dport 8765' /etc/sysconfig/iptables")[0] if status != 0: # append after the 5432 entry ssh_client.run_command( "sed -i '/--dport 5432/a -A INPUT -p tcp -m tcp --dport 8765 -j ACCEPT' " "/etc/sysconfig/iptables") ssh_client.run_command("systemctl restart iptables") # last ssh command, close # Check if accessible try: requests.get("http://{}:8765/".format(args.address)) except requests.exceptions.ConnectionError: print("Could not detect running listener!") exit(2)
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('repo_url', help='updates base url') parser.add_argument('--reboot', help='reboot after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # create repo file repo_file = "[rhel-updates]\nname=rhel6-updates\nbaseurl=" + \ args.repo_url + "\nenabled=1\ngpgcheck=0" # create repo file on appliance print 'Create update repo file' status, out = client.run_command( 'echo "%s" >/etc/yum.repos.d/rhel_updates.repo' % repo_file) # update print 'Running rhel updates...' status, out = client.run_command('yum update -y --nogpgcheck') print "\n" + out + "\n" if status != 0: print "ERROR during update" sys.exit(1) # reboot if args.reboot: print 'Appliance reboot' status, out = client.run_command('reboot') else: print 'A reboot is recommended.'
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaserver'], } import fauxfactory appliance_name = 'cfmeappliance'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = appliance.IPAppliance().address appliance_fqdn = '{}.{}'.format(appliance_name, data['iparealm'].lower()) ipaserver_ssh = SSHClient(**connect_kwargs) # updating the /etc/hosts is a workaround due to the # https://bugzilla.redhat.com/show_bug.cgi?id=1360928 command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ipaserver_ssh.run_command(command) ipaserver_ssh.close() ssh = SSHClient() rc, out = ssh.run_command('appliance_console_cli --host {}'.format(appliance_fqdn)) assert rc == 0, out ssh.run_command('echo "127.0.0.1\t{}" > /etc/hosts'.format(appliance_fqdn)) ensure_browser_open() login_admin() if data["ipaserver"] not in get_ntp_servers(): set_ntp_servers(data["ipaserver"]) sleep(120) auth = ExternalAuthSetting(get_groups=data.pop("get_groups", False)) auth.setup() logout() creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) rc, out = ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format(**data) ) assert rc == 0, out assert "failed" not in out.lower(), "External auth setup failed:\n{}".format(out) login_admin()
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance', nargs='?', default=None) args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], } if args.address: ssh_kwargs['hostname'] = args.address # Init SSH client ssh_client = SSHClient(**ssh_kwargs) # compile assets if required (not required on 5.2) if not ssh_client.get_version().startswith("5.2"): if ssh_client.run_command("ls /var/www/miq/vmdb/public/assets")[0] != 0: ssh_client.run_rake_command("assets:precompile") ssh_client.run_rake_command("evm:restart") print "CFME UI worker restarted, UI should be available shortly"
def restart_appliance(address): print('Restarting evmserverd on {}'.format(address)) with SSHClient(hostname=address, **ssh_creds) as client: status, out = client.run_command('systemctl restart evmserverd') if status != 0: print("Restarting evmserverd failed on {}".format(address)) sys.exit(1) else: print("Restarting succeeded on {}".format(address))
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('-R', '--reverse', help='flag to indicate the patch should be undone', action='store_true', default=False, dest='reverse') args = parser.parse_args() # Find the patch file patch_file_name = data_path_for_filename('ajax_wait.diff', scripts_path.strpath) # Set up temp dir tmpdir = mkdtemp() atexit.register(shutil.rmtree, tmpdir) source = '/var/www/miq/vmdb/public/javascripts/application.js' target = os.path.join(tmpdir, 'application.js') # Init SSH client ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } client = SSHClient(**ssh_kwargs) print 'retriving appliance.js from appliance' client.get_file(source, target) os.chdir(tmpdir) # patch, level 4, patch direction (default forward), ignore whitespace, don't output rejects direction = '-N -R' if args.reverse else '-N' exitcode = subprocess.call('patch -p4 %s -l -r- < %s' % (direction, patch_file_name), shell=True) if exitcode == 0: # Put it back after successful patching. print 'replacing appliance.js on appliance' client.put_file(target, source) else: print 'not changing appliance' return exitcode
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('repo_url', help='updates base url') parser.add_argument('--reboot', help='reboot after installation ' + '(required for proper operation)', action="store_true") args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # create repo file repo_file = "[rhel-updates]\nname=rhel6-updates\nbaseurl=" + args.repo_url + "\nenabled=1\ngpgcheck=0" # create repo file on appliance print 'Create update repo file' status, out = client.run_command('echo "%s" >/etc/yum.repos.d/rhel_updates.repo' % repo_file) # update print 'Running rhel updates...' status, out = client.run_command('yum update -y --nogpgcheck') print "\n" + out + "\n" if status != 0: print "ERROR during update" sys.exit(1) # reboot if args.reboot: print 'Appliance reboot' status, out = client.run_command('reboot') else: print 'A reboot is recommended.'
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') parser.add_argument('--region', default=0, type=int, help='region to assign to the new DB') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } client = SSHClient(**ssh_kwargs) print 'Initializing Appliance Internal DB' if client.run_command('ls -l /bin/appliance_console_cli')[0] == 0: status, out = client.run_command('appliance_console_cli --ca --region 1 --internal') if status != 0: print 'Enabling DB failed with error:' print out sys.exit(1) else: print 'DB Enabled, evm watchdog should start the UI shortly.' else: rbt_repl = { 'miq_lib': '/var/www/miq/lib', 'region': args.region } # Find and load our rb template with replacements base_path = os.path.dirname(__file__) rbt = datafile.data_path_for_filename( 'enable-internal-db.rbt', base_path) rb = datafile.load_data_file(rbt, rbt_repl) # sent rb file over to /tmp remote_file = '/tmp/%s' % generate_random_string() client.put_file(rb.name, remote_file) # Run the rb script, clean it up when done status, out = client.run_command('ruby %s' % remote_file) client.run_command('rm %s' % remote_file) if status != 0: print 'Enabling DB failed with error:' print out sys.exit(1) else: print 'DB Enabled, evm watchdog should start the UI shortly.'
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("address", help="hostname or ip address of target appliance") parser.add_argument("repo_url", help="updates base url") parser.add_argument( "--reboot", help="reboot after installation " + "(required for proper operation)", action="store_true" ) args = parser.parse_args() ssh_kwargs = { "username": credentials["ssh"]["username"], "password": credentials["ssh"]["password"], "hostname": args.address, } # Init SSH client client = SSHClient(**ssh_kwargs) # create repo file repo_file = "[rhel-updates]\nname=rhel6-updates\nbaseurl=" + args.repo_url + "\nenabled=1\ngpgcheck=0" # create repo file on appliance print "Create update repo file" status, out = client.run_command('echo "%s" >/etc/yum.repos.d/rhel_updates.repo' % repo_file) # update print "Running rhel updates..." status, out = client.run_command("yum update -y --nogpgcheck") print "\n" + out + "\n" if status != 0: print "ERROR during update" sys.exit(1) # reboot if args.reboot: print "Appliance reboot" status, out = client.run_command("reboot") else: print "A reboot is recommended."
def generate_version_files(): yield starttime = time.time() ssh_client = SSHClient() relative_path = os.path.relpath(str(results_path), str(os.getcwd())) relative_string = relative_path + '/{}*'.format(test_ts) directory_list = glob.glob(relative_string) for directory in directory_list: module_path = os.path.join(directory, 'version_info') if os.path.exists(str(module_path)): return else: os.mkdir(str(module_path)) generate_system_file(ssh_client, module_path) generate_processes_file(ssh_client, module_path) generate_gems_file(ssh_client, module_path) generate_rpms_file(ssh_client, module_path) timediff = time.time() - starttime logger.info('Generated all version files in {}'.format(timediff)) ssh_client.close()
def fix_merkyl_workaround(): """Workaround around merkyl not opening an iptables port for communication""" ssh_client = SSHClient() if ssh_client.run_command('test -f /etc/init.d/merkyl').rc == 0: logger.info('Rudely overwriting merkyl init.d on appliance;') local_file = data_path.join("bundles").join("merkyl").join("merkyl") remote_file = "/etc/init.d/merkyl" ssh_client.put_file(local_file.strpath, remote_file) ssh_client.run_command("service merkyl restart")
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('source', nargs='?', default='ManageIQ', help='Source Domain name') parser.add_argument('dest', nargs='?', default='Default', help='Destination Domain name') parser.add_argument('username', nargs='?', default=credentials['ssh']['username'], help='SSH username for target appliance') parser.add_argument('password', nargs='?', default=credentials['ssh']['password'], help='SSH password for target appliance') args = parser.parse_args() ssh_kwargs = { 'username': args.username, 'password': args.password } if args.hostname is not None: ssh_kwargs['hostname'] = args.hostname client = SSHClient(stream_output=True, **ssh_kwargs) # Make sure the working dir exists client.run_command('mkdir -p /tmp/miq') print 'Exporting domain...' export_opts = 'DOMAIN={} EXPORT_DIR=/tmp/miq PREVIEW=false OVERWRITE=true'.format(args.source) export_cmd = 'evm:automate:export {}'.format(export_opts) print export_cmd client.run_rake_command(export_cmd) ro_fix_cmd = "sed -i 's/system: true/system: false/g' /tmp/miq/ManageIQ/__domain__.yaml" client.run_command(ro_fix_cmd) import_opts = 'DOMAIN={} IMPORT_DIR=/tmp/miq PREVIEW=false'.format(args.source) import_opts += ' OVERWRITE=true IMPORT_AS={}'.format(args.dest) import_cmd = 'evm:automate:import {}'.format(import_opts) print import_cmd client.run_rake_command(import_cmd)
def test_verify_revert_snapshot(test_vm, provider_key, provider_type, provider_data, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider_crud.get_mgmt_system().get_ip_address(snapshot1.vm.name) print ip ssh_kwargs = { 'username': credentials[provider_data['full_template']['creds']]['username'], 'password': credentials[provider_data['full_template']['creds']]['password'], 'hostname': ip } ssh = SSHClient(**ssh_kwargs) ssh.run_command('touch snapshot1.txt') snapshot1.create() ssh.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_OFF, timeout=720) register_event( test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) test_vm.power_control_from_cfme(option=Vm.POWER_ON, cancel=False) pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider_crud}) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_running(test_vm.name), "vm not running") client = SSHClient(**ssh_kwargs) request.addfinalizer(test_vm.delete_from_provider) try: wait_for(lambda: client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client client = SSHClient(**ssh_kwargs) # generate installed rpm list status, out = client.run_command('rpm -qa | sort > /tmp/installed_rpms.txt') client.get_file('/tmp/installed_rpms.txt', 'installed_rpms.txt') # compress logs dir status, out = client.run_command('cd /var/www/miq/vmdb; tar zcvf /tmp/appliance_logs.tgz log') client.get_file('/tmp/appliance_logs.tgz', 'appliance_logs.tgz')
def test_verify_revert_snapshot(test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name) ssh_kwargs = { 'username': credentials[provider.data['full_template']['creds']]['username'], 'password': credentials[provider.data['full_template']['creds']]['password'], 'hostname': ip } ssh = SSHClient(**ssh_kwargs) ssh.run_command('touch snapshot1.txt') snapshot1.create() ssh.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=720) register_event('VmOrTemplate', test_vm.name, ['request_vm_start', 'vm_start']) test_vm.power_control_from_cfme(option=test_vm.POWER_ON, cancel=False) navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm not running") client = SSHClient(**ssh_kwargs) try: wait_for(lambda: client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name)
def main(): parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('hostname', nargs='?', default=None, help='hostname or ip address of target appliance') parser.add_argument('username', nargs='?', default=credentials['ssh']['username'], help='SSH username for target appliance') parser.add_argument('password', nargs='?', default=credentials['ssh']['password'], help='SSH password for target appliance') args = parser.parse_args() ssh_kwargs = { 'username': args.username, 'password': args.password } if args.hostname is not None: ssh_kwargs['hostname'] = args.hostname client = SSHClient(stream_output=True, **ssh_kwargs) # `service evmserverd stop` is a little slow, and we're destroying the # db, so rudely killing ruby speeds things up significantly print 'Stopping ruby processes...' client.run_command('killall ruby') client.run_rake_command('evm:db:reset') client.run_command('service evmserverd start') print 'Waiting for appliance UI...' args = [ scripts_path.join('wait_for_appliance_ui.py').strpath, # SSHClient has the smarts to get our hostname if none was provided # Soon, utils.appliance.Appliance will be able to do all of this # and this will be made good 'http://%s' % client._connect_kwargs['hostname'] ] return subprocess.call(args)
def list_orphaned_files_per_host(host_name, host_datastore_urls, provider_key, vm_registered_files, unregistered_files): try: providers_data = cfme_data.get("management_systems", {}) hosts = providers_data[provider_key]['hosts'] hostname = [host['name'] for host in hosts if host_name in host['name']] # check if hostname returned is ipaddress if not hostname: hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', host_name) connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': hostname[0] } for datastore_url in host_datastore_urls: datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url)) ssh_client = SSHClient(**connect_kwargs) command = 'ls ~/{}'.format(datastore_path[0]) exit_status, output = ssh_client.run_command(command) ssh_client.close() files_in_datastore = output.splitlines() if exit_status == 0 else [] for fil in files_in_datastore: if fil not in vm_registered_files: file_type = 'UNKNOWN' number_of_files = 0 command = 'test -d ~/{}/{}; echo $?'.format(datastore_path[0], fil) exit_status, output = ssh_client.run_command(command) ssh_client.close() file_extension = re.findall(r'.*\.(\w*)', fil) if file_extension: file_type = file_extension[0] number_of_files = 1 if int(output.strip()) == 0: command = 'ls ~/{}/{} | wc -l'.format(datastore_path[0], fil) exit_status, output = ssh_client.run_command(command) number_of_files = output.strip() command = 'find ~/{}/{} -name "*.vmx" | wc -l'.format( datastore_path[0], fil) vmx_status, vmx_output = ssh_client.run_command(command) command = 'find ~/{}/{} -name "*.vmtx" | wc -l'.format( datastore_path[0], fil) vmtx_status, vmtx_output = ssh_client.run_command(command) command = 'find ~/{}/{} -name "*.vmdk" | wc -l'.format( datastore_path[0], fil) vmdk_status, vmdk_output = ssh_client.run_command(command) ssh_client.close() if int(vmx_output.strip()) > 0: file_type = 'VirtualMachine' elif int(vmtx_output.strip()) > 0: file_type = 'Template' elif int(vmdk_output.strip()) > 0: file_type = 'VMDK' # delete_this = '~/' + datastore_path[0] + fil # command = 'rm -rf {}'.format(delete_this) # exit_status, output = ssh_client.run_command(command) # logger.info(output) file_path = '~/' + datastore_path[0] + fil if file_path not in unregistered_files: unregistered_files.append(file_path) print('{}\t\t{}\t\t{}\t\t{}'.format( hostname[0], file_path, file_type, number_of_files)) except Exception as e: logger.error(e) return False
def connect_direct_lun_to_appliance(self, vm_name, disconnect): """Connects or disconnects the direct lun disk to an appliance. Args: vm_name: Name of the VM with the appliance. disconnect: If False, it will connect, otherwise it will disconnect """ if "provider_key" in self.kwargs: provider_name = self.kwargs["provider_key"] else: raise TypeError("provider_key not supplied to the provider.") # check that the vm exists on the rhev provider, get the ip address if so try: vm = self.api.vms.get(vm_name) ip_addr = self.get_ip_address(vm_name) except: raise NameError("{} not found on {}".format(vm_name, provider_name)) # check for direct lun definition on provider's cfme_data.yaml if "direct_lun" not in self.kwargs: raise ValueError("direct_lun key not in cfme_data.yaml under provider {}, exiting...".format(provider_name)) # does the direct lun exist prov_data = self.kwargs dlun_name = prov_data["direct_lun"]["name"] dlun = self.api.disks.get(dlun_name) if dlun is None: # Create the iSCSI storage connection: sc = params.StorageConnection() sc.set_address(prov_data["direct_lun"]["ip_address"]) sc.set_type("iscsi") sc.set_port(int(prov_data["direct_lun"]["port"])) sc.set_target(prov_data["direct_lun"]["iscsi_target"]) # Add the direct LUN disk: lu = params.LogicalUnit() lu.set_id(prov_data["direct_lun"]["iscsi_target"]) lu.set_address(sc.get_address()) lu.set_port(sc.get_port()) lu.set_target(sc.get_target()) storage = params.Storage() storage.set_type("iscsi") storage.set_logical_unit([lu]) disk = params.Disk() disk.set_name(dlun_name) disk.set_interface("virtio") disk.set_type("iscsi") disk.set_format("raw") disk.set_lun_storage(storage) disk.set_shareable(True) disk = self.api.disks.add(disk) dlun = self.api.disks.get(dlun_name) # add it if not disconnect: retries = 0 while retries < 3: retries += 1 direct_lun = params.Disk(id=dlun.id) try: # is the disk present and active? vm_disk_list = vm.get_disks().list() for vm_disk in vm_disk_list: if vm_disk.name == dlun_name: if vm_disk.active: return else: vm_disk.activate() return # if not present, add it and activate direct_lun = params.Disk(id=dlun.id) added_lun = vm.disks.add(direct_lun) added_lun.activate() except Exception as e: logger.error("Exception caught: %s" % str(e)) if retries == 3: logger.error("exhausted retries and giving up") raise else: logger.info("sleeping for 30s and retrying to connect direct lun") time.sleep(30) # Init SSH client, run pvscan on the appliance ssh_kwargs = { "username": conf.credentials["ssh"]["username"], "password": conf.credentials["ssh"]["password"], "hostname": ip_addr, } client = SSHClient(**ssh_kwargs) status, out = client.run_command("pvscan", timeout=5 * 60) # remove it else: vm_dlun = vm.disks.get(name=dlun_name) if vm_dlun is None: return else: detach = params.Action(detach=True) vm_dlun.delete(action=detach)
def get_current_version_string(): """Returns string contents of /var/www/miq/vmdb/VERSION""" ssh_client = SSHClient() exit_status, current_version = ssh_client.run_command('cat /var/www/miq/vmdb/VERSION') return current_version.strip()