def configure_second_admin_firewall(ip, network, netmask, interface, master_ip): # Allow input/forwarding for nodes from the second admin network and # enable source NAT for UDP (tftp) and HTTP (proxy server) traffic # on master node rules = [ ('-I INPUT -i {0} -m comment --comment "input from 2nd admin network" ' '-j ACCEPT').format(interface), ('-t nat -I POSTROUTING -s {0}/{1} -o e+ -m comment --comment ' '"004 forward_admin_net2" -j MASQUERADE').format(network, netmask), ("-t nat -I POSTROUTING -o {0} -d {1}/{2} -p udp -m addrtype " "--src-type LOCAL -j SNAT --to-source {3}").format( interface, network, netmask, master_ip), ("-t nat -I POSTROUTING -d {0}/{1} -p tcp --dport 8888 -j SNAT " "--to-source {2}").format(network, netmask, master_ip), ('-I FORWARD -i {0} -m comment --comment ' '"forward custom admin net" -j ACCEPT').format(interface) ] for rule in rules: cmd = 'iptables {0}'.format(rule) result = SSHManager().execute(ip=ip, cmd=cmd) assert_equal(result['exit_code'], 0, ('Failed to add firewall rule for second admin net ' 'on master node: {0}, {1}').format(rule, result)) # Save new firewall configuration cmd = 'service iptables save' result = SSHManager().execute(ip=ip, cmd=cmd) assert_equal(result['exit_code'], 0, ('Failed to save firewall configuration on master node:' ' {0}').format(result))
def get_sha_sum(file_path): logger.debug('Get md5 fo file {0}'.format(file_path)) md5_sum = SSHManager().execute_on_remote( SSHManager().admin_ip, cmd='md5sum {0}'.format(file_path))['stdout_str'].strip() logger.info('MD5 is {0}'.format(md5_sum)) return md5_sum
def get_full_filename(wildcard_name): cmd = 'ls {}'.format(wildcard_name) logger.info("Getting full file name for: {}".format(wildcard_name)) full_pkg_name = SSHManager().execute_on_remote(ip=SSHManager().admin_ip, cmd=cmd)['stdout_str'] return full_pkg_name
def update_rpm(path, rpm_cmd='/bin/rpm -Uvh --force'): cmd = '{rpm_cmd} {rpm_path}'\ .format(rpm_cmd=rpm_cmd, rpm_path=path) logger.info("Updating rpm '{0}'".format(path)) try: SSHManager().execute(SSHManager().admin_ip, cmd) logger.info("Rpm '{0}' has been updated successfully ".format(path)) except Exception as ex: logger.error("Could not update rpm '{0}' in the '{1}'".format( path, ex)) raise
def get_mongo_partitions(ip, device): # Moved from checkers.py for improvement of code ret = SSHManager().check_call( ip=ip, cmd="lsblk | grep {device} | awk {size}".format( device=device, size=re.escape('{print $4}')))['stdout'] if not ret: logger.error("Partition not present! {partitions}: ".format( partitions=SSHManager().check_call(ip=ip, cmd="parted {device} print"))) raise Exception() logger.debug("Partitions: {part}".format(part=ret)) return ret
def get_ceph_partitions(ip, device, fs_type="xfs"): # Moved from checkers.py for improvement of code ret = SSHManager().check_call( ip=ip, cmd="parted {device} print | grep {type}".format( device=device, type=fs_type))['stdout'] if not ret: logger.error("Partition not present! {partitions}: ".format( partitions=SSHManager().check_call(ip=ip, cmd="parted {device} print"))) raise Exception() logger.debug("Partitions: {part}".format(part=ret)) return ret
def restart_service(service_name, timeout=30): restart_cmd = 'service {} restart'.format(service_name) get_status_cmd = 'service {} status'.format(service_name) logger.info("Restarting service '{0}'".format(service_name)) try: SSHManager().execute_on_remote(SSHManager().admin_ip, restart_cmd) helpers.wait(lambda: 'running' in SSHManager().execute_on_remote( SSHManager().admin_ip, get_status_cmd)['stdout_str'], timeout=timeout) logger.info("Service '{0}' has been restarted successfully ".format( service_name)) except Exception as ex: logger.error("Could not restart '{0}' service " "in the '{1}'".format(service_name, ex)) raise
def connect_slaves_to_repo(environment, nodes, repo_name): repo_ip = environment.get_admin_node_ip() repo_port = '8080' repourl = 'http://{master_ip}:{repo_port}/{repo_name}/'.format( master_ip=repo_ip, repo_name=repo_name, repo_port=repo_port) if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_UBUNTU: cmds = [ "echo -e '\ndeb {repourl} /' > /etc/apt/sources.list.d/{repo_name}" ".list".format(repourl=repourl, repo_name=repo_name), "apt-key add <(curl -s '{repourl}/Release.key') || :".format( repourl=repourl), # Set highest priority to all repositories located on master node "echo -e 'Package: *\nPin: origin {0}\nPin-Priority: 1060' > " "/etc/apt/preferences.d/custom_repo".format( environment.get_admin_node_ip()), "apt-get update" ] else: cmds = [ "yum-config-manager --add-repo {url}".format(url=repourl), "echo -e 'gpgcheck=0\npriority=20' >>/etc/yum.repos.d/{ip}_{port}_" "{repo}_.repo".format(ip=repo_ip, repo=repo_name, port=repo_port), "yum -y clean all", ] for slave in nodes: for cmd in cmds: SSHManager().execute_on_remote(ip=slave['ip'], cmd=cmd)
def generate_facts(ip): ssh_manager = SSHManager() facter_dir = '/var/lib/puppet/lib/facter' exluded_facts = ['naily.rb'] if not ssh_manager.isdir_on_remote(ip, facter_dir): ssh_manager.mkdir_on_remote(ip, facter_dir) logger.debug('Directory {0} was created'.format(facter_dir)) ssh_manager.execute_on_remote(ip, 'rm -f {0}/*.rb'.format(facter_dir)) logger.debug('rb files were removed from {0}'.format(facter_dir)) facts_files = ssh_manager.execute_on_remote( ip, 'find /etc/puppet/modules/ -wholename "*/lib/facter/*.rb"')['stdout'] facts_files = [i.strip() for i in facts_files] logger.debug('The following facts {0} will' ' be copied to {1}'.format(facts_files, facter_dir)) for fact in facts_files: if not fact or re.sub(r'.*/', '', fact) in exluded_facts: continue ssh_manager.execute_on_remote(ip, 'cp {0} {1}/'.format(fact, facter_dir)) logger.debug('Facts were copied') ssh_manager.execute_on_remote(ip, 'facter -p -y > /tmp/facts.yaml') logger.info('Facts yaml was created') ssh_manager.execute_on_remote(ip, 'rm -f {0}/*.rb'.format(facter_dir)) logger.debug('rb files were removed from {0}'.format(facter_dir))
def store_astute_yaml_for_one_node(nailgun_node): ssh_manager = SSHManager() if 'roles' not in nailgun_node: return None errmsg = 'Downloading "{0}.yaml" from the {1} failed' msg = 'File "{0}.yaml" was downloaded from the {1}' nodename = nailgun_node['name'] ip = nailgun_node['ip'] for role in nailgun_node['roles']: filename = '{0}/{1}-{2}-{3}.yaml'.format(settings.LOGS_DIR, func_name, nodename, role) if not ssh_manager.isfile_on_remote(ip, '/etc/{0}.yaml'.format(role)): role = 'primary-' + role if ssh_manager.download_from_remote(ip, '/etc/{0}.yaml'.format(role), filename): logger.info(msg.format(role, nodename)) else: logger.error(errmsg.format(role, nodename)) if settings.DOWNLOAD_FACTS: fact_filename = re.sub(r'-\w*\.', '-facts.', filename) generate_facts(ip) if ssh_manager.download_from_remote(ip, '/tmp/facts.yaml', fact_filename): logger.info(msg.format('facts', nodename)) else: logger.error(errmsg.format('facts', nodename))
def check_emc_cinder_config(cls, ip, path): with SSHManager().open_on_remote(ip=ip, path=path) as f: cinder_conf = configparser.ConfigParser() cinder_conf.readfp(f) asserts.assert_equal( cinder_conf.get('DEFAULT', 'volume_driver'), 'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver') asserts.assert_equal( cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'), 'global') asserts.assert_false( cinder_conf.getboolean('DEFAULT', 'destroy_empty_storage_group')) asserts.assert_true( cinder_conf.getboolean('DEFAULT', 'initiator_auto_registration')) asserts.assert_equal( cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1) asserts.assert_equal(cinder_conf.getint('DEFAULT', 'default_timeout'), 10) asserts.assert_equal(cinder_conf.get('DEFAULT', 'naviseccli_path'), '/opt/Navisphere/bin/naviseccli') asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip')) asserts.assert_true( cinder_conf.has_option('DEFAULT', 'san_secondary_ip')) asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login')) asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password'))
def __init__(self): self.ssh_manager = SSHManager() self.ip = self.ssh_manager.admin_ip self.path_scripts = ('{0}/fuelweb_test/helpers/' .format(os.environ.get("WORKSPACE", "./"))) self.remote_path_scripts = '/tmp/' self.ubuntu_script = 'regenerate_ubuntu_repo' self.centos_script = 'regenerate_centos_repo' self.local_mirror_ubuntu = settings.LOCAL_MIRROR_UBUNTU self.local_mirror_centos = settings.LOCAL_MIRROR_CENTOS self.ubuntu_release = settings.UBUNTU_RELEASE self.centos_supported_archs = ['noarch', 'x86_64'] self.pkgs_list = [] self.custom_pkgs_mirror_path = '' if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE: # Trying to determine the root of Ubuntu repository pkgs_path = settings.CUSTOM_PKGS_MIRROR.split('/dists/') if len(pkgs_path) == 2: self.custom_pkgs_mirror = pkgs_path[0] self.custom_pkgs_mirror_path = '/dists/{}'.format(pkgs_path[1]) else: self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR else: self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR
def replace_rpm_package(package): """Replaced rpm package.rpm on master node with package.rpm from review """ ssh = SSHManager() logger.info("Patching {}".format(package)) if not settings.UPDATE_FUEL: raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True') try: # Upload package target_path = '/var/www/nailgun/{}/'.format(package) ssh.upload_to_remote(ip=ssh.admin_ip, source=settings.UPDATE_FUEL_PATH.rstrip('/'), target=target_path) package_name = package package_ext = '*.noarch.rpm' pkg_path = os.path.join(target_path, '{}{}'.format(package_name, package_ext)) full_package_name = get_full_filename(wildcard_name=pkg_path) logger.debug('Package name is {0}'.format(full_package_name)) full_package_path = os.path.join(os.path.dirname(pkg_path), full_package_name) # Update package on master node if not does_new_pkg_equal_to_installed_pkg( installed_package=package_name, new_package=full_package_path): update_rpm(path=full_package_path) except Exception: logger.error("Could not upload package") raise
def check_emc_cinder_config(cls, ip, path): command = 'cat {0}'.format(path) conf_data = SSHManager().execute_on_remote(ip, command)['stdout_str'] conf_data = cStringIO(conf_data) cinder_conf = configparser.ConfigParser() cinder_conf.readfp(conf_data) asserts.assert_equal( cinder_conf.get('DEFAULT', 'volume_driver'), 'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver') asserts.assert_equal( cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'), 'global') asserts.assert_false( cinder_conf.getboolean('DEFAULT', 'destroy_empty_storage_group')) asserts.assert_true( cinder_conf.getboolean('DEFAULT', 'initiator_auto_registration')) asserts.assert_equal( cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1) asserts.assert_equal(cinder_conf.getint('DEFAULT', 'default_timeout'), 10) asserts.assert_equal(cinder_conf.get('DEFAULT', 'naviseccli_path'), '/opt/Navisphere/bin/naviseccli') asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip')) asserts.assert_true( cinder_conf.has_option('DEFAULT', 'san_secondary_ip')) asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login')) asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password'))
def get_nodes_tasks(node_id): """ :param node_id: an integer number of node id :return: a set of deployment tasks for corresponding node """ tasks = set() ssh = SSHManager() result = ssh.execute_on_remote(ssh.admin_ip, "ls /var/log/astute") filenames = [filename.strip() for filename in result['stdout']] for filename in filenames: ssh.download_from_remote( ssh.admin_ip, destination="/var/log/astute/{0}".format(filename), target="/tmp/{0}".format(filename)) data = fileinput.FileInput( files=["/tmp/{0}".format(filename) for filename in filenames], openhook=fileinput.hook_compressed) for line in data: if "Task time summary" in line \ and "node {}".format(node_id) in line: # FIXME: define an exact search of task task_name = line.split("Task time summary: ")[1].split()[0] check = any([excluded_task in task_name for excluded_task in TASKS_BLACKLIST]) if check: continue tasks.add(task_name) return tasks
def parse_pcs_status_xml(remote_ip): """Parse 'pcs status xml'. <Nodes> section :param remote_ip: remote IP address :return: nested dictionary with node-fqdn and attribute name as keys """ pcs_status_dict = SSHManager().execute_on_remote( remote_ip, 'pcs status xml')['stdout_str'] return pcs_status_dict
def install_plugin_check_code(ip, plugin, exit_code=0): # Moved from checkers.py for improvement of code cmd = "cd /var && fuel plugins --install {0} ".format(plugin) chan, _, stderr, _ = SSHManager().execute_async_on_remote(ip=ip, cmd=cmd) logger.debug('Try to read status code from chain...') assert_equal( chan.recv_exit_status(), exit_code, 'Install script fails with next message {0}'.format(''.join(stderr)))
def check_cinder_vmware_srv(self): """Verify cinder-vmware service.""" ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( self.cluster_id, ["controller"]) cmd = '. openrc; cinder-manage service list | grep vcenter | ' \ 'grep ":-)"' logger.debug('CMD: {}'.format(cmd)) SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)
def get_file_size(ip, file_name, file_path): # Moved from checkers.py for improvement of code file_size = SSHManager().execute( ip, 'stat -c "%s" {0}/{1}'.format(file_path, file_name)) assert_equal( int(file_size['exit_code']), 0, "Failed to get '{0}/{1}' file stats on" " remote node".format(file_path, file_name)) return int(file_size['stdout'][0].rstrip())
def configure_second_admin_dhcp(ip, interface): dhcp_conf_file = '/etc/cobbler/dnsmasq.template' cmd = ("sed '0,/^interface.*/s//\\0\\ninterface={0}/' -i {1};" "cobbler sync").format(interface, dhcp_conf_file) result = SSHManager().execute(ip=ip, cmd=cmd) assert_equal(result['exit_code'], 0, ('Failed to add second admin ' 'network to DHCP server: {0}').format(result))
def _turnon_executable_ruby(node): """Set mode +x for /usr/bin/ruby :param node: dict, node attributes """ ssh = SSHManager() cmd = 'chmod +x /usr/bin/ruby' ssh.execute_on_remote(node['ip'], cmd)
def check_package_origin(ip, package, origin): """Check that given package was installed from given repository""" version_cmd = ("apt-cache policy {package} | " "awk '$1 == \"Installed:\" {{print $2}}'").format( package=package) version = SSHManager().execute_on_remote(ip, version_cmd)['stdout_str'] origin_cmd = ("apt-cache madison {package} | " "grep '{version}'").format(package=package, version=version) result = SSHManager().execute_on_remote(ip, origin_cmd)['stdout'] # we only want to check for the UCA uri because it might be in main # or proposed repos = [str.strip(line.split("|")[2]) for line in result] assert_true( any([origin in repo for repo in repos]), "Package {!r}: repository {!r} not found in {!r}".format( package, origin, repos) )
def install_mos_repos(): """ Upload and install fuel-release packet with mos-repo description and install necessary packets for packetary Fuel installation :return: nothing """ logger.info("upload fuel-release packet") if not settings.FUEL_RELEASE_PATH: raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path') try: ssh = SSHManager() pack_path = '/tmp/' full_pack_path = os.path.join(pack_path, 'fuel-release*.noarch.rpm') ssh.upload_to_remote( ip=ssh.admin_ip, source=settings.FUEL_RELEASE_PATH.rstrip('/'), target=pack_path) if settings.RPM_REPOS_YAML: with ssh.open_on_remote( ip=ssh.admin_ip, path='/etc/yum.repos.d/custom.repo', mode="w") as f: f.write(generate_yum_repos_config(settings.RPM_REPOS_YAML)) if settings.DEB_REPOS_YAML: ssh = SSHManager() pack_path = "/root/default_deb_repos.yaml" ssh.upload_to_remote( ip=ssh.admin_ip, source=settings.DEB_REPOS_YAML, target=pack_path) except Exception: logger.exception("Could not upload package") raise logger.debug("setup MOS repositories") cmd = "rpm -ivh {}".format(full_pack_path) ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "yum install -y fuel-setup" ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
def update_ostf(): logger.info("Uploading new package from {0}".format( settings.UPDATE_FUEL_PATH)) ssh = SSHManager() pack_path = '/var/www/nailgun/fuel-ostf/' full_pack_path = os.path.join(pack_path, 'fuel-ostf*.noarch.rpm') ssh.upload_to_remote(ssh.admin_ip, source=settings.UPDATE_FUEL_PATH.rstrip('/'), target=pack_path) # Check old fuel-ostf package cmd = "rpm -q fuel-ostf" old_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str'] logger.info('Current package version of ' 'fuel-ostf: {0}'.format(old_package)) cmd = "rpm -qp {0}".format(full_pack_path) new_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str'] logger.info('Package from review {0}'.format(new_package)) if old_package == new_package: logger.info('Package {0} is installed'.format(new_package)) return cmd = "service ostf stop" ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "service ostf status" helpers.wait(lambda: "dead" in ssh.execute_on_remote( ssh.admin_ip, cmd=cmd, raise_on_assert=False, assert_ec_equal=[3])[ 'stdout_str'], timeout=60) logger.info("OSTF status: inactive") cmd = "rpm -e fuel-ostf" ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path) ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "rpm -q fuel-ostf" installed_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str'] assert_equal( installed_package, new_package, "The new package {0} was not installed. Actual {1}".format( new_package, installed_package)) cmd = "service ostf start" ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "service ostf status" helpers.wait(lambda: "running" in ssh.execute_on_remote( ssh.admin_ip, cmd=cmd)['stdout_str'], timeout=60) cmd = "curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8777" helpers.wait(lambda: "401" in ssh.execute_on_remote( ssh.admin_ip, cmd=cmd, raise_on_assert=False)['stdout_str'], timeout=60) logger.info("OSTF status: RUNNING")
def get_os_packages(ip, packages_pattern=None): """Pick names of some OS packages from node""" if not packages_pattern: packages_pattern = "neutron|nova|cinder|keystone|" \ "ceilometer|ironic|glance" packages = SSHManager().execute_on_remote( ip, "dpkg-query -W -f '${{package}}\\n' | grep -E '{}'".format( packages_pattern))['stdout_str'] return packages.split('\n')
def centos_setup_fuel(self, hostname): logger.info("upload fuel-release packet") if not settings.FUEL_RELEASE_PATH: raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path') try: ssh = SSHManager() pack_path = '/tmp/' full_pack_path = os.path.join(pack_path, 'fuel-release*.noarch.rpm') ssh.upload_to_remote(ip=ssh.admin_ip, source=settings.FUEL_RELEASE_PATH.rstrip('/'), target=pack_path) except Exception: logger.exception("Could not upload package") logger.debug("Update host information") cmd = "echo HOSTNAME={} >> /etc/sysconfig/network".format(hostname) ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "echo {0} {1} {2} >> /etc/hosts".format( ssh.admin_ip, hostname, settings.FUEL_MASTER_HOSTNAME) ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "hostname {}".format(hostname) ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) logger.debug("setup MOS repositories") cmd = "rpm -ivh {}".format(full_pack_path) ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "yum install -y fuel-setup" ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) cmd = "yum install -y screen" ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) logger.info("Install Fuel services") cmd = "screen -dm bash -c 'showmenu=no wait_for_external_config=yes " \ "bootstrap_admin_node.sh'" ssh.execute_on_remote(ssh.admin_ip, cmd=cmd) self.env.wait_for_external_config() self.env.admin_actions.modify_configs(self.env.d_env.router()) self.env.kill_wait_for_external_config() self.env.wait_bootstrap() logger.debug("Check Fuel services") self.env.admin_actions.wait_for_fuel_ready() logger.debug("post-installation configuration of Fuel services") self.fuel_post_install_actions()
def delete_astute_log(): """Delete astute.log file(s) on master node. This is to ensure that no unwanted tasks are used by tests (e.g. from previous deployments). :return: None """ ssh = SSHManager() ssh.execute_on_remote(ssh.admin_ip, "rm /var/log/astute/astute*") ssh.execute_on_remote(ssh.admin_ip, "systemctl restart astute.service")
def does_new_pkg_equal_to_installed_pkg(installed_package, new_package): rpm_query_cmd = '/bin/rpm -q' current_version_cmd = '{rpm} {package}'\ .format(rpm=rpm_query_cmd, package=installed_package) urlfile_version_cmd = '{rpm} --package {package}'\ .format(rpm=rpm_query_cmd, package=new_package) logger.info("Comparing installed package version against " "the package version to be installed") current_version = SSHManager().execute_on_remote( ip=SSHManager().admin_ip, cmd=current_version_cmd)['stdout_str'] new_version = SSHManager().execute_on_remote( ip=SSHManager().admin_ip, cmd=urlfile_version_cmd)['stdout_str'] logger.info("Installed package version: {}".format(current_version)) logger.info("Package version to be installed: {}".format(new_version)) return current_version == new_version
def create_and_attach_empty_volume(self): """Create and attach to instance empty volume.""" mount_point = '/dev/sdb' public_ip = self.fuel_web.get_public_vip(self.cluster_id) os_conn = OpenStackActions(public_ip) vol = os_conn.create_volume(availability_zone=self.cinder_az) image = os_conn.get_image(self.vmware_image) net = os_conn.get_network(self.net_name) sg = os_conn.get_security_group(self.sg_name) vm = os_conn.create_server(image=image, availability_zone=self.vcenter_az, security_groups=[sg], net_id=net['id'], timeout=210) floating_ip = os_conn.assign_floating_ip(vm) helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180, timeout_msg="Node {ip} is not accessible by SSH.".format( ip=floating_ip.ip)) logger.info("Attaching volume via cli") ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles( self.cluster_id, ["controller"]) cmd = '. openrc; nova volume-attach {srv_id} {volume_id} {mount}' \ ''.format(srv_id=vm.id, volume_id=vol.id, mount=mount_point) logger.debug('CMD: {}'.format(cmd)) SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd) helpers.wait(lambda: os_conn.get_volume_status(vol) == "in-use", timeout=30, timeout_msg="Volume doesn't reach 'in-use' state") vm.reboot() sleep(10) helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180, timeout_msg="Node {ip} is not accessible by SSH.".format( ip=floating_ip.ip)) controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles( self.cluster_id, ["controller"])[0] with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote: cmd = 'sudo /sbin/fdisk -l | grep {}'.format(mount_point) res = remote.execute_through_host(hostname=floating_ip.ip, cmd=cmd, auth=cirros_auth) logger.debug('OUTPUT: {}'.format(res['stdout_str'])) assert_equal(res['exit_code'], 0, "Attached volume is not found") os_conn.delete_instance(vm) os_conn.verify_srv_deleted(vm) os_conn.delete_volume(vol)
def connect_admin_to_repo(environment, repo_name): repo_ip = environment.get_admin_node_ip() repo_port = '8080' repourl = 'http://{master_ip}:{repo_port}/{repo_name}/'.format( master_ip=repo_ip, repo_name=repo_name, repo_port=repo_port) cmds = [ "yum-config-manager --add-repo {url}".format(url=repourl), "echo -e 'gpgcheck=0\npriority=20' >>/etc/yum.repos.d/{ip}_{port}_" "{repo}_.repo".format(ip=repo_ip, repo=repo_name, port=repo_port), "yum -y clean all", # FIXME(apanchenko): # Temporary disable this check in order to test packages update # inside Docker containers. When building of new images for containers # is implemented, we should check here that `yum check-update` returns # ONLY `100` exit code (updates are available for master node). "yum check-update; [[ $? -eq 100 || $? -eq 0 ]]" ] for cmd in cmds: SSHManager().execute_on_remote(ip=SSHManager().admin_ip, cmd=cmd)