def net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False): """Checks the availability of a port from outside using another machine (over SSH)""" from cfme.utils.ssh import SSHClient port = int(port) if not addr: addr = my_ip_address() if port not in _ports[addr] or force: if not machine_addr: machine_addr = urlparse.urlparse(store.base_url).hostname if not ssh_creds: ssh_client = store.current_appliance.ssh_client else: ssh_client = SSHClient( hostname=machine_addr, username=ssh_creds['username'], password=ssh_creds['password'] ) with ssh_client: # on exception => fails with return code 1 cmd = '''python -c " import sys, socket addr = socket.gethostbyname('%s') socket.create_connection((addr, %d), timeout=10) sys.exit(0) "''' % (addr, port) ret, out = ssh_client.run_command(cmd) if ret == 0: _ports[addr][port] = True else: _ports[addr][port] = False return _ports[addr][port]
def net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False): """Checks the availability of a port from outside using another machine (over SSH)""" from cfme.utils.ssh import SSHClient port = int(port) if not addr: addr = my_ip_address() if port not in _ports[addr] or force: if not machine_addr: machine_addr = store.current_appliance.hostname if not ssh_creds: ssh_client = store.current_appliance.ssh_client else: ssh_client = SSHClient(hostname=machine_addr, username=ssh_creds['username'], password=ssh_creds['password']) with ssh_client: # on exception => fails with return code 1 cmd = '''python2 -c " import sys, socket addr = socket.gethostbyname('%s') socket.create_connection((addr, %d), timeout=10) sys.exit(0) "''' % (addr, port) result = ssh_client.run_command(cmd) _ports[addr][port] = result.success return _ports[addr][port]
def disable_forgery_protection(): starttime = time.time() with SSHClient() as ssh_client: logger.info('Turning off "allow_forgery_protection"') ssh_client.run_command( "sed -i \'s/allow_forgery_protection = true/allow_forgery_protection = false/\' " "/var/www/miq/vmdb/config/environments/production.rb") ssh_client.run_command("service evmserverd restart") timediff = time.time() - starttime logger.info( 'Turned off "allow_forgery_protection" in: {}'.format(timediff)) yield starttime = time.time() with SSHClient() as ssh_client: logger.info('Turning on "allow_forgery_protection"') ssh_client.run_command( "sed -i \'s/allow_forgery_protection = false/allow_forgery_protection = true/\' " "/var/www/miq/vmdb/config/environments/production.rb") ssh_client.run_command("service evmserverd restart") timediff = time.time() - starttime logger.info('Turned on "allow_forgery_protection" in: {}'.format(timediff))
def db_restore(temp_appliance_extended_db): app = temp_appliance_extended_db # get app version for backup ver = str(temp_appliance_extended_db.version).replace('.', '_') ver = ver[:3] if ver[3] == '_' else ver[:4] # get DB backup file db_storage_hostname = conf.cfme_data.bottlenecks.hostname db_storage_ssh = SSHClient(hostname=db_storage_hostname, **conf.credentials.bottlenecks) rand_filename = "/tmp/db.backup_{}".format(fauxfactory.gen_alphanumeric()) db_storage_ssh.get_file("{}/db.backup_{}".format( conf.cfme_data.bottlenecks.backup_path, ver), rand_filename) app.ssh_client.put_file(rand_filename, "/tmp/evm_db.backup") app.evmserverd.stop() app.db.drop() app.db.create() app.db.restore() # When you load a database from an older version of the application, you always need to # run migrations. # https://bugzilla.redhat.com/show_bug.cgi?id=1643250 app.db.migrate() app.db.fix_auth_key() app.db.fix_auth_dbyml() app.evmserverd.start() app.wait_for_web_ui()
def db_restore(temp_appliance_extended_db): app = temp_appliance_extended_db # get app version for backup ver = str(temp_appliance_extended_db.version).replace('.', '_') ver = ver[:3] if ver[3] == '_' else ver[:4] # get DB backup file db_storage_hostname = conf.cfme_data.bottlenecks.hostname db_storage_ssh = SSHClient(hostname=db_storage_hostname, **conf.credentials.bottlenecks) rand_filename = f"/tmp/db.backup_{fauxfactory.gen_alphanumeric()}" db_storage_ssh.get_file( "{}/db.backup_{}".format(conf.cfme_data.bottlenecks.backup_path, ver), rand_filename) app.ssh_client.put_file(rand_filename, "/tmp/evm_db.backup") app.evmserverd.stop() app.db.drop() app.db.create() app.db.restore() # When you load a database from an older version of the application, you always need to # run migrations. # https://bugzilla.redhat.com/show_bug.cgi?id=1643250 app.db.migrate() app.db.fix_auth_key() app.db.fix_auth_dbyml() app.evmserverd.start() app.wait_for_web_ui()
def setup_nfs_samba_backup(appl1): # Fetch db from first appliance and push it to nfs/samba server connect_kwargs = { 'hostname': cfme_data['network_share']['hostname'], 'username': credentials['ssh']['username'], 'password': credentials['depot_credentials']['password'] } loc = cfme_data['network_share']['nfs_path'] nfs_smb = SSHClient(**connect_kwargs) dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric()) appl1.ssh_client.get_file("/tmp/evm_db.backup", dump_filename) nfs_smb.put_file(dump_filename, "{}share.backup".format(loc))
def update_db_yaml(address): with SSHClient(hostname=address, **ssh_creds) as client: client.run_command('cd /var/www/miq/vmdb') # TODO Issue 8595, MiqPassword alias/gem will go away # IPAppliance.password_gem property version picks the gem name # We only have an address here, will have to look for the gem. result = client.run_rails_command( '\'puts MiqPassword.encrypt("smartvm");\'') if result.failed: print('Retrieving encrypted db password failed on {}'.format( address)) sys.exit(1) else: encrypted_pass = result.output result = client.run_command( ('cd /var/www/miq/vmdb; ' 'sed -i.`date +%m-%d-%Y` "s/password:'******' .*/password: {}/g" config/database.yml'.format( re.escape(encrypted_pass)))) if result.failed: print('Updating database.yml failed on {}'.format(address)) print(result.output) sys.exit(1) else: print('Updating database.yml succeeded on {}'.format( address))
def make_ssh_client(ssh_host, ssh_user, ssh_pass): connect_kwargs = { 'username': ssh_user, 'password': ssh_pass, 'hostname': ssh_host } return SSHClient(**connect_kwargs)
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client with SSHClient(**ssh_kwargs) as ssh_client: # generate installed rpm list status, out = ssh_client.run_command( 'rpm -qa | sort > /tmp/installed_rpms.txt') ssh_client.get_file('/tmp/installed_rpms.txt', 'installed_rpms.txt') # compress logs dir status, out = ssh_client.run_command( 'cd /var/www/miq/vmdb; ' 'tar zcvf /tmp/appliance_logs.tgz log') ssh_client.get_file('/tmp/appliance_logs.tgz', 'appliance_logs.tgz')
def db_restore(temp_appliance_extended_db): app = temp_appliance_extended_db app.evmserverd.stop() app.db.drop() db_storage_hostname = conf.cfme_data['bottlenecks']['hostname'] db_storage_ssh = SSHClient(hostname=db_storage_hostname, **conf.credentials['bottlenecks']) with db_storage_ssh as ssh_client: # Different files for different versions ver = "_58" if temp_appliance_extended_db.version < '5.9' else "_59" rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric()) ssh_client.get_file("/home/backups/otsuman_db_bottlenecks/v2_key{}".format(ver), rand_filename) dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric()) ssh_client.get_file("/home/backups/otsuman_db_bottlenecks/db.backup{}".format(ver), dump_filename) region_filename = "/tmp/REGION_{}".format(fauxfactory.gen_alphanumeric()) ssh_client.get_file("/home/backups/otsuman_db_bottlenecks/REGION{}".format(ver), region_filename) guid_filename = "/tmp/GUID_{}".format(fauxfactory.gen_alphanumeric()) ssh_client.get_file("/home/backups/otsuman_db_bottlenecks/GUID{}".format(ver), guid_filename) with app.ssh_client as ssh_client: ssh_client.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") ssh_client.put_file(dump_filename, "/tmp/evm_db.backup") ssh_client.put_file(region_filename, "/var/www/miq/vmdb/REGION") ssh_client.put_file(guid_filename, "/var/www/miq/vmdb/GUID") app.db.restore() app.start_evm_service() app.wait_for_web_ui()
def ssh(provider, full_template, full_template_vm): vm_name = full_template_vm.name with SSHClient( username=credentials[full_template.creds]['username'], password=credentials[full_template.creds]['password'], hostname=provider.mgmt.get_ip_address(vm_name)) as ssh_client: yield ssh_client
def make_ssh_client(rhevip, sshname, sshpass): connect_kwargs = { 'username': sshname, 'password': sshpass, 'hostname': rhevip } return SSHClient(**connect_kwargs)
def ssh_client(self): ssh_kwargs = dict( hostname=self.host1, username="******", password=self.bind_password, ) return SSHClient(**ssh_kwargs)
def get_config_list(ssh_client: SSHClient): # Obtain network configuration script for eth0 and store it in a list network_cfg_cmd = ssh_client.run_command( 'cat /etc/sysconfig/network-scripts/ifcfg-eth0') assert network_cfg_cmd.success config_list = network_cfg_cmd.output.split('\n') return config_list
def setup_external_auth_openldap(**data): """Sets up the appliance for an external authentication with OpenLdap. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaddress'], } current_appliance = get_or_create_current_appliance() appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = current_appliance.hostname appliance_fqdn = '{}.{}'.format(appliance_name, data['domain_name']) with SSHClient(**connect_kwargs) as ldapserver_ssh: # updating the /etc/hosts is a workaround due to the # https://bugzilla.redhat.com/show_bug.cgi?id=1360928 command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ldapserver_ssh.run_command(command) ldapserver_ssh.get_file(remote_file=data['cert_filepath'], local_path=conf_path.strpath) ensure_browser_open() current_appliance.server.login_admin() current_appliance.server.authentication.set_auth_mode(mode='external', get_groups=data.pop( "get_groups", True)) current_appliance.configure_appliance_for_openldap_ext_auth(appliance_fqdn) current_appliance.server.logout()
def connect(self, provider, instance): static_mode_payload = self.payload wait_for(lambda: static_mode_payload['ip_address'] in instance.all_ips, timeout='5m') # Any host that works can be used. To keep things simple, just pick the first one with # fingers crossed. jump_host_config = provider.data['hosts'][0] jump_host_creds = Credential.from_config( jump_host_config['credentials']['default']) jump_host_session = SSHClient(hostname=jump_host_config['name'], username=jump_host_creds.principal, password=jump_host_creds.secret) def _connection_factory(ip): return jump_host_session.tunnel_to( hostname=ip, username='******', password=static_mode_payload['root_password'], timeout=ssh.CONNECT_TIMEOUT) # Cleanup this explicitly because we can get to problems with ordering the cleanups of # tunneled connections and the tunnels at the session end. # Note that the SSHClient.__exit__ does NOT close the connection. with closing( retry_connect(lambda: instance.all_ips, _connection_factory, num_sec=ssh.CONNECT_RETRIES_TIMEOUT, delay=ssh.CONNECT_SSH_DELAY)) as ssh_client: yield ssh_client
def list_templates(hostname, username, password, upload_folder): with SSHClient(hostname=hostname, username=username, password=password) as ssh: cmd = 'find {u}/* -maxdepth 1 -type d -exec basename {{}} \;'.format(u=upload_folder) result = ssh.run_command(cmd) if result.success and len(str(result)) > 0: return str(result).split() else: return []
def utility_vm_ssh(utility_vm): vm, injected_user_cred, __ = utility_vm ip = _pick_responding_ip(vm, 22) with SSHClient(hostname=ip, username=injected_user_cred.principal, password=injected_user_cred.secret) as ssh_client: yield ssh_client
def main(): parser = argparse.ArgumentParser( epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('address', help='hostname or ip address of target appliance') args = parser.parse_args() ssh_kwargs = { 'username': credentials['ssh']['username'], 'password': credentials['ssh']['password'], 'hostname': args.address } # Init SSH client with SSHClient(**ssh_kwargs) as ssh_client: snmp_path = scripts_data_path.join("snmp") # Copy print("Copying files") ssh_client.put_file( snmp_path.join("snmp_listen.rb").strpath, "/root/snmp_listen.rb") ssh_client.put_file( snmp_path.join("snmp_listen.sh").strpath, "/root/snmp_listen.sh") # Enable after startup print("Enabling after startup") result = ssh_client.run_command( "grep 'snmp_listen[.]sh' /etc/rc.local") if result.failed: ssh_client.run_command( "echo 'cd /root/ && ./snmp_listen.sh start' >> /etc/rc.local") assert ssh_client.run_command("grep 'snmp_listen[.]sh' /etc/rc.local" ).success, ("Could not enable!") # Run! print("Starting listener") assert ssh_client.run_command("cd /root/ && ./snmp_listen.sh start" ).success, ("Could not start!") # Open the port if not opened print("Opening the port in iptables") result = ssh_client.run_command( "grep '--dport 8765' /etc/sysconfig/iptables") if result.failed: # append after the 5432 entry ssh_client.run_command( "sed -i '/--dport 5432/a -A INPUT -p tcp -m tcp --dport 8765 -j ACCEPT' " "/etc/sysconfig/iptables") ssh_client.run_command("systemctl restart iptables") # last ssh command, close # Check if accessible try: requests.get("http://{}:8765/".format(args.address)) except requests.exceptions.ConnectionError: print("Could not detect running listener!") exit(2)
def check_ftp(appliance, ftp, server_name, server_zone_id, check_ansible_logs=False): server_string = '{}_{}'.format(server_name, server_zone_id) with ftp: # Files must have been created after start with server string in it (for ex. EVM_1) date_group = '(_.*?){4}' zip_files = ftp.filesystem.search(re.compile( r"^.*{}{}[.]zip$".format(server_string, date_group)), directories=False) assert zip_files, "No logs found!" # Collection of Models and Dialogs introduced in 5.10 but it work only in 5.11 (BZ 1656318) if appliance.version >= "5.11" and not BZ(1706989).blocks: models_files = ftp.filesystem.search(re.compile( r"^Models_.*{}[.]zip$".format(server_string)), directories=False ) assert models_files, 'No models files found' dialogs_files = ftp.filesystem.search(re.compile( r"^Dialogs_.*{}[.]zip$".format(server_string)), directories=False ) assert dialogs_files, 'No dialogs files found' # Check the times of the files by names datetimes = [] for zip_file in zip_files: # files looks like "Current_region_0_default_1_EVM_1_20170127_043343_20170127_051010.zip" # 20170127_043343 - date and time date = zip_file.name.split("_") date_from = date[7] + date[8] # removing ".zip" from last item date_to = date[9] + date[10][:-4] try: date_from = datetime.strptime(date_from, "%Y%m%d%H%M%S") date_to = datetime.strptime(date_to, "%Y%m%d%H%M%S") # if the file is correct, check ansible logs (~/ROOT/var/log/tower/setup-*) are there logs_ansible = "ROOT/var/log/tower/setup" if zip_file.name.startswith("Current") \ else "log/ansible_tower" if ftp.login != 'anonymous' and check_ansible_logs: # can't login as anon using SSH with SSHClient(hostname=ftp.host, username=ftp.login, password=ftp.password) as log_ssh: result = log_ssh.run_command( "unzip -l ~{} | grep {}".format(zip_file.path, logs_ansible), ensure_user=True) assert '.log' in result.output log_file_size = result.output.split()[0] assert int(log_file_size) > 0, "Log file is empty!" except ValueError: assert False, "Wrong file matching of {}".format(zip_file.name) datetimes.append((date_from, date_to, zip_file.name)) # Check for the gaps if len(datetimes) > 1: for i in range(len(datetimes) - 1): dt = datetimes[i + 1][0] - datetimes[i][1] assert dt.total_seconds() >= 0.0, ( "Negative gap between log files ({}, {})".format( datetimes[i][2], datetimes[i + 1][2]) )
def utility_vm_ssh(utility_vm): vm, injected_user_cred, __ = utility_vm ip = pick_responding_ip(lambda: vm.all_ips, 22, IP_PICK_TIMEOUT, ROUNDS_DELAY, ATTEMPT_TIMEOUT) with SSHClient(hostname=ip, username=injected_user_cred.principal, password=injected_user_cred.secret) as ssh_client: yield ssh_client
def restart_appliance(address): print('Restarting evmserverd on {}'.format(address)) with SSHClient(hostname=address, **ssh_creds) as client: status, out = client.run_command('systemctl restart evmserverd') if status != 0: print("Restarting evmserverd failed on {}".format(address)) sys.exit(1) else: print("Restarting succeeded on {}".format(address))
def restart_appliance(address): print(f'Restarting evmserverd on {address}') with SSHClient(hostname=address, **ssh_creds) as client: result = client.run_command('systemctl restart evmserverd') if result.failed: print(f"Restarting evmserverd failed on {address}") sys.exit(1) else: print(f"Restarting succeeded on {address}")
def make_ssh_client(provider_mgmt): creds = credentials[provider_mgmt.kwargs.get('ssh_creds')] connect_kwargs = { 'username': creds['username'], 'password': creds['password'], 'hostname': provider_mgmt.kwargs.get('ipaddress') } return SSHClient(**connect_kwargs)
def get_ssh_client(hostname, credentials): """ Returns fresh ssh client connected to given server using given credentials """ hostname = urlparse('scheme://{}'.format(hostname)).netloc connect_kwargs = { 'username': credentials['username'], 'password': credentials['password'], 'hostname': hostname, } return SSHClient(**connect_kwargs)
def update_password(address): with SSHClient(hostname=address, **ssh_creds) as client: result = client.run_command( 'ruby /var/www/miq/vmdb/tools/fix_auth.rb --hostname localhost --password smartvm') if result.failed: print('Updating DB password failed on {}'.format(address)) print(result.output) sys.exit(1) else: print('DB password updated on {}'.format(address))
def checks(self, ssh_client: SSHClient, config_list: List[str] = None): if config_list is None: config_list = self.get_config_list(ssh_client) # Compare contents of network script with cloud-init payload assert f'BOOTPROTO={self.bootproto}' in config_list # Check that correct hostname has been set hostname_cmd = ssh_client.run_command('hostname') assert hostname_cmd.success assert hostname_cmd.output.strip() == self.payload['hostname']
def db_restore(temp_appliance_extended_db): app = temp_appliance_extended_db # get app version for backup ver = str(temp_appliance_extended_db.version).replace('.', '_') ver = ver[:3] if ver[3] == '_' else ver[:4] # get bd backup file db_storage_hostname = conf.cfme_data.bottlenecks.hostname db_storage_ssh = SSHClient(hostname=db_storage_hostname, **conf.credentials.bottlenecks) rand_filename = "/tmp/db.backup_{}".format(fauxfactory.gen_alphanumeric()) db_storage_ssh.get_file("{}/db.backup_{}".format( conf.cfme_data.bottlenecks.backup_path, ver), rand_filename) app.ssh_client.put_file(rand_filename, "/tmp/evm_db.backup") app.evmserverd.stop() app.db.drop() app.db.create() app.db.restore() app.db.fix_auth_key() app.db.fix_auth_dbyml() app.evmserverd.start() app.wait_for_web_ui()
def db_restore(temp_appliance_extended_db): app = temp_appliance_extended_db # get app version for backup ver = str(temp_appliance_extended_db.version).replace('.', '_') ver = ver[:3] if ver[3] == '_' else ver[:4] # get bd backup file db_storage_hostname = conf.cfme_data.bottlenecks.hostname db_storage_ssh = SSHClient(hostname=db_storage_hostname, **conf.credentials.bottlenecks) rand_filename = "/tmp/db.backup_{}".format(fauxfactory.gen_alphanumeric()) db_storage_ssh.get_file("{}/db.backup_{}".format( conf.cfme_data.bottlenecks.backup_path, ver), rand_filename) app.ssh_client.put_file(rand_filename, "/tmp/evm_db.backup") app.evmserverd.stop() app.db.drop() app.db.create() app.db.restore() app.db.fix_auth_key() app.db.fix_auth_dbyml() app.start_evm_service() app.wait_for_web_ui()
def setup_external_auth_ipa(**data): """Sets up the appliance for an external authentication with IPA. Keywords: get_groups: Get User Groups from External Authentication (httpd). ipaserver: IPA server address. iparealm: Realm. credentials: Key of the credential in credentials.yaml """ connect_kwargs = { 'username': credentials['host_default']['username'], 'password': credentials['host_default']['password'], 'hostname': data['ipaserver'], } current_appliance = get_or_create_current_appliance() appliance_name = 'cfmeappliance{}'.format(fauxfactory.gen_alpha(7).lower()) appliance_address = current_appliance.address appliance_fqdn = '{}.{}'.format(appliance_name, data['iparealm'].lower()) with SSHClient(**connect_kwargs) as ipaserver_ssh: ipaserver_ssh.run_command('cp /etc/hosts /etc/hosts_bak') ipaserver_ssh.run_command( "sed -i -r '/^{}/d' /etc/hosts".format(appliance_address)) command = 'echo "{}\t{}" >> /etc/hosts'.format(appliance_address, appliance_fqdn) ipaserver_ssh.run_command(command) with current_appliance.ssh_client as ssh: result = ssh.run_command( 'appliance_console_cli --host {}'.format(appliance_fqdn)).success if not current_appliance.is_pod: assert result else: # appliance_console_cli fails when calls hostnamectl --host. it seems docker issue # raise BZ ? assert str(ssh.run_command('hostname')).rstrip() == appliance_fqdn ensure_browser_open() current_appliance.server.login_admin() if data["ipaserver"] not in ( current_appliance.server.settings.ntp_servers_form.values()): current_appliance.server.settings.update_ntp_servers( {'ntp_server_1': data["ipaserver"]}) sleep(120) appliance.server.authentication.set_auth_mode(mode='external', get_groups=data.pop( "get_groups", False)) creds = credentials.get(data.pop("credentials"), {}) data.update(**creds) assert ssh.run_command( "appliance_console_cli --ipaserver {ipaserver} --iparealm {iparealm} " "--ipaprincipal {principal} --ipapassword {password}".format( **data)) current_appliance.server.login_admin()
def generate_version_files(): yield starttime = time.time() ssh_client = SSHClient() relative_path = os.path.relpath(str(results_path), str(os.getcwd())) relative_string = relative_path + '/{}*'.format(test_ts) directory_list = glob.glob(relative_string) for directory in directory_list: module_path = os.path.join(directory, 'version_info') if os.path.exists(str(module_path)): return else: os.mkdir(str(module_path)) generate_system_file(ssh_client, module_path) generate_processes_file(ssh_client, module_path) generate_gems_file(ssh_client, module_path) generate_rpms_file(ssh_client, module_path) timediff = time.time() - starttime logger.info('Generated all version files in {}'.format(timediff)) ssh_client.close()
def get_worker_pid(worker_type): """Obtains the pid of the first worker with the worker_type specified""" with SSHClient() as ssh_client: exit_status, out = ssh_client.run_command('systemctl status evmserverd 2> /dev/null | grep ' '-m 1 \'{}\' | awk \'{{print $7}}\''.format(worker_type)) worker_pid = str(out).strip() if out: logger.info('Obtained {} PID: {}'.format(worker_type, worker_pid)) else: logger.error('Could not obtain {} PID, check evmserverd running or if specific role is' ' enabled...'.format(worker_type)) assert out return worker_pid
def upload_template(hostname, username, password, provider, url, name, provider_data, stream, upload_folder): try: kwargs = {} if name is None: name = cfme_data['basic_info']['appliance_template'] logger.info("OPENSHIFT:%r Start uploading Template: %r", provider, name) if not check_kwargs(**kwargs): return False if name not in list_templates(hostname, username, password, upload_folder): with SSHClient(hostname=hostname, username=username, password=password) as ssh: dest_dir = os.path.join(upload_folder, name) result = ssh.run_command('mkdir {dir}'.format(dir=dest_dir)) if result.failed: logger.exception("OPENSHIFT: cant create folder %r", str(result)) raise download_cmd = ( 'wget -q --no-parent --no-directories --reject "index.html*" ' '--directory-prefix={dir} -r {url}') result = ssh.run_command( download_cmd.format(dir=dest_dir, url=url)) if result.failed: logger.exception("OPENSHIFT: cannot upload template %r", str(result)) raise if not provider_data: logger.info("OPENSHIFT:%r Adding template %r to trackerbot", provider, name) trackerbot.trackerbot_add_provider_template( stream, provider, name) else: logger.info("OPENSHIFT:%r template %r already exists", provider, name) except Exception: logger.exception('OPENSHIFT:%r Exception during upload_template', provider) return False finally: logger.info("OPENSHIFT:%r End uploading Template: %r", provider, name)
def verify_revert_snapshot(full_test_vm, provider, soft_assert, register_event, request, active_snapshot=False): if provider.one_of(RHEVMProvider): # RHV snapshots have only description, no name snapshot1 = new_snapshot(full_test_vm, has_name=False) else: snapshot1 = new_snapshot(full_test_vm) full_template = getattr(provider.data.templates, 'full_template') # Define parameters of the ssh connection ssh_kwargs = { 'hostname': snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name), 'username': credentials[full_template.creds]['username'], 'password': credentials[full_template.creds]['password'] } ssh_client = SSHClient(**ssh_kwargs) # We need to wait for ssh to become available on the vm, it can take a while. Without # this wait, the ssh command would fail with 'port 22 not available' error. # Easiest way to solve this is just mask the exception with 'handle_exception = True' # and wait for successful completition of the ssh command. # The 'fail_func' ensures we close the connection that failed with exception. # Without this, the connection would hang there and wait_for would fail with timeout. wait_for(lambda: ssh_client.run_command('touch snapshot1.txt').rc == 0, num_sec=400, delay=20, handle_exception=True, fail_func=ssh_client.close()) # Create first snapshot snapshot1.create() ssh_client.run_command('touch snapshot2.txt') # If we are not testing 'revert to active snapshot' situation, we create another snapshot if not active_snapshot: if provider.one_of(RHEVMProvider): snapshot2 = new_snapshot(full_test_vm, has_name=False) else: snapshot2 = new_snapshot(full_test_vm) snapshot2.create() # VM on RHV provider must be powered off before snapshot revert if provider.one_of(RHEVMProvider): full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_OFF, cancel=False) full_test_vm.wait_for_vm_state_change( desired_state=full_test_vm.STATE_OFF, timeout=900) snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(lambda: snapshot1.active, num_sec=300, delay=20, fail_func=provider.browser.refresh) # VM state after revert should be OFF full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_OFF, timeout=720) # Let's power it ON again full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_ON, cancel=False) full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_ON, timeout=900) soft_assert(full_test_vm.provider.mgmt.is_vm_running(full_test_vm.name), "vm not running") # Wait for successful ssh connection wait_for(lambda: ssh_client.run_command('test -e snapshot1.txt').rc == 0, num_sec=400, delay=20, handle_exception=True, fail_func=ssh_client.close()) try: result = ssh_client.run_command('test -e snapshot1.txt') assert not result.rc result = ssh_client.run_command('test -e snapshot2.txt') assert result.rc logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.exception('Revert to snapshot %s Failed', snapshot1.name) ssh_client.close()