def parse_centos_repo(repo_string, priority): # Validate RPM repository string format results = re.search(""" ^ # [beginning of the string] ([\w\-\.\/]+) # group 1: repo name , # [comma separator] ( # group 2: uri; \w+:\/\/ # - protocol, i.e. 'http://' [\w\-\.\/]+ # - hostname (?::\d+) # - port, i.e. ':8080', if exists ?[\w\-\.\/]+ # - rest of the path, if exists ) # - end of group 2 \s* # [space separator] ,? # [optional comma separator] (\d+)? # group 3: optional priority of the repository $ # [ending of the string]""", repo_string.strip(), re.VERBOSE) if results: return {"name": results.group(1), "priority": int(results.group(3) or priority), "type": 'rpm', "uri": results.group(2)} else: logger.error("Provided RPM repository has incorrect format: {}" .format(repo_string))
def wrapper(*args, **kwargs): result = func(*args, **kwargs) try: cluster_id = args[0].env.fuel_web.get_last_created_cluster() logger.info("start checking snapshot logs") controllers = \ args[0].env.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, ['controller']) computes = \ args[0].env.fuel_web.get_nailgun_cluster_nodes_by_roles( cluster_id, ['compute']) logger.debug("controller nodes are {}".format(controllers)) logger.debug("compute nodes are {}".format(computes)) controllers_fqdns = [controller['fqdn'] for controller in controllers] compute_fqdns = [compute['fqdn'] for compute in computes] logger.debug("controller fqdns are {}".format(controllers_fqdns)) logger.debug("compute fqdns are {}".format(compute_fqdns)) args[0].env.fuel_web.task_wait( args[0].env.fuel_web.client.generate_logs(), 60 * 10) snapshot_name = args[0].env.ssh_manager.execute_on_remote( args[0].env.ssh_manager.admin_ip, cmd="ls -I *.tar.xz /var/dump/")['stdout_str'] logger.debug("snapshot name is {}".format(snapshot_name)) check_snapshot_logs(args[0].env.ssh_manager.admin_ip, snapshot_name, controllers_fqdns, compute_fqdns) return result except Exception: logger.error(traceback.format_exc()) raise
def upload_manila_image(master_node_ip, image_dest_path=MANILA_IMAGE_DEST_PATH): """Copy Manila qcow2 image to the master node. :type master_node_ip: string master-node ip :type image_dest_path: string destination path """ logger.info(image_dest_path) try: logger.info("Start to upload manila image file") SSHManager().upload_to_remote( ip=master_node_ip, source=MANILA_IMAGE_PATH, target=image_dest_path ) manila_image_name = MANILA_IMAGE_PATH.split('/') dest_path = '{0}/{1}'.format( image_dest_path, manila_image_name[-1]) logger.info('File {} was uploaded on master'.format(dest_path)) return dest_path except Exception: logger.error('Failed to upload file') logger.error(traceback.format_exc()) return False
def parse_ubuntu_repo(repo_string, name, priority): # Validate DEB repository string format results = re.search(""" ^ # [beginning of the string] ([\w\-\.\/]+)? # group 1: optional repository name (for Nailgun) ,? # [optional comma separator] (deb|deb-src) # group 2: type; search for 'deb' or 'deb-src' \s+ # [space separator] ( # group 3: uri; \w+:\/\/ # - protocol, i.e. 'http://' [\w\-\.\/]+ # - hostname (?::\d+) # - port, i.e. ':8080', if exists ?[\w\-\.\/]+ # - rest of the path, if exists ) # - end of group 2 \s+ # [space separator] ([\w\-\.\/]+) # group 4: suite; \s* # [space separator], if exists ( # group 5: section; [\w\-\.\/\s]* # - several space-separated names, or None ) # - end of group 4 ,? # [optional comma separator] (\d+)? # group 6: optional priority of the repository $ # [ending of the string]""", repo_string.strip(), re.VERBOSE) if results: return {"name": results.group(1) or name, "priority": int(results.group(6) or priority), "type": results.group(2), "uri": results.group(3), "suite": results.group(4), "section": results.group(5) or ''} else: logger.error("Provided DEB repository has incorrect format: {}" .format(repo_string))
def _contain_public_ip(data, _used_networks): _has_puplic_ip = False _ip_regex = (r'\b((\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\.){3}' r'(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\b') _not_public_regex = [ r'\b10(\.\d{1,3}){3}', r'\b127(\.\d{1,3}){3}', r'\b169\.254(\.\d{1,3}){2}', r'172\.(1[6-9]|2[0-9]|3[0-1])(\.\d{1,3}){2}', r'192\.168(\.\d{1,3}){2}', r'2(2[4-9]|[3-5][0-9])(\.\d{1,3}){3}' ] for _match in re.finditer(_ip_regex, data): # If IP address isn't public and doesn't belong to defined for # deployment pools (e.g. admin, public, storage), then skip it if any(re.search(_r, _match.group()) for _r in _not_public_regex) \ and not any(IPAddress(_match.group()) in IPNetwork(net) for net in _used_networks): continue logger.debug('Usage statistics with piblic IP(s):\n {0}'. format(data)) logger.error('Found public IP in usage statistics: "{0}"'.format( _match.group())) _has_puplic_ip = True return _has_puplic_ip
def _create_net_subnet(self, cluster): """Create net and subnet""" contrail_ip = self.fuel_web.get_public_vip(cluster) logger.info('The ip is %s', contrail_ip) net = Common( controller_ip=contrail_ip, user='******', password='******', tenant='admin' ) net.neutron.create_network(body={ 'network': { 'name': 'net04', 'admin_state_up': True, } }) network_id = '' network_dic = net.neutron.list_networks() for dd in network_dic['networks']: if dd.get("name") == "net04": network_id = dd.get("id") if network_id == "": logger.error('Network id empty') logger.debug("id {0} to master node".format(network_id)) net.neutron.create_subnet(body={ 'subnet': { 'network_id': network_id, 'ip_version': 4, 'cidr': '10.100.0.0/24', 'name': 'subnet04', } })
def _compress_logs(_dirs, _archive_path): cmd = "tar --absolute-names --warning=no-file-changed -czf {t} {d}".format(t=_archive_path, d=" ".join(_dirs)) result = admin_remote.execute(cmd) if result["exit_code"] != 0: logger.error("Compressing of logs on master node failed: {0}".format(result)) return False return True
def wrapper(*args, **kwargs): result = func(*args, **kwargs) if settings.UPDATE_FUEL: logger.info("Update fuel's packages from directory {0}." .format(settings.UPDATE_FUEL_PATH)) environment = get_current_env(args) if not environment: logger.warning("Decorator was triggered " "from unexpected class.") return result centos_files_count, ubuntu_files_count = \ environment.admin_actions.upload_packages( local_packages_dir=settings.UPDATE_FUEL_PATH, centos_repo_path=settings.LOCAL_MIRROR_CENTOS, ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU) if not centos_files_count and not ubuntu_files_count: raise ConfigurationException('Nothing to update,' ' packages to update values is 0') cluster_id = environment.fuel_web.get_last_created_cluster() if centos_files_count > 0: environment.docker_actions.execute_in_containers( cmd='yum -y install yum-plugin-priorities') # Update docker containers and restart them environment.docker_actions.execute_in_containers( cmd='yum clean expire-cache; yum update -y') environment.docker_actions.restart_containers() with environment.d_env.get_admin_remote() as remote: # Update packages on master node remote.execute( 'yum -y install yum-plugin-priorities;' 'yum clean expire-cache; yum update -y') # Add auxiliary repository to the cluster attributes if settings.OPENSTACK_RELEASE_UBUNTU not in \ settings.OPENSTACK_RELEASE: environment.fuel_web.add_local_centos_mirror( cluster_id, path=settings.LOCAL_MIRROR_CENTOS, priority=settings.AUX_RPM_REPO_PRIORITY) if ubuntu_files_count > 0: # Add auxiliary repository to the cluster attributes if settings.OPENSTACK_RELEASE_UBUNTU in \ settings.OPENSTACK_RELEASE: environment.fuel_web.add_local_ubuntu_mirror( cluster_id, name="Auxiliary", path=settings.LOCAL_MIRROR_UBUNTU, priority=settings.AUX_DEB_REPO_PRIORITY) else: logger.error("{0} .DEB files uploaded but won't be used" " because of deploying wrong release!" .format(ubuntu_files_count)) if settings.SYNC_DEPL_TASKS: with environment.d_env.get_admin_remote() as remote: remote.execute("fuel release --sync-deployment-tasks" " --dir /etc/puppet/") return result
def upload_tarball(node_ssh, tar_path, tar_target): try: logger.debug("Start to upload tar file") node_ssh.upload(tar_path, tar_target) except Exception: logger.error('Failed to upload file') logger.error(traceback.format_exc())
def regenerate_repo(self, regenerate_script, local_mirror_path): # Uploading scripts that prepare local repositories: # 'regenerate_centos_repo' and 'regenerate_ubuntu_repo' try: remote = self.env.get_admin_remote() remote.upload('{0}/{1}'.format(self.path_scripts, regenerate_script), self.remote_path_scripts) remote.execute('chmod 755 {0}/{1}'.format(self.remote_path_scripts, regenerate_script)) except Exception: logger.error('Could not upload scripts for updating repositories.' '\n{0}'.format(traceback.format_exc())) raise # Update the local repository using prevously uploaded script. script_cmd = 'REPO_PATH={0} {1}/{2}'.format(local_mirror_path, self.remote_path_scripts, regenerate_script) script_result = remote.execute(script_cmd) assert_equal(0, script_result['exit_code'], self.assert_msg(script_cmd, script_result['stderr'])) logger.info('Local "{0}" repository {1} has been updated successfuly.' .format(settings.OPENSTACK_RELEASE, local_mirror_path))
def wrapper(*args, **kwagrs): try: return func(*args, **kwagrs) except SkipTest: raise SkipTest() except Exception: if args and 'snapshot' in args[0].__dict__: name = 'error_%s' % args[0].snapshot description = "Failed in method '%s'." % args[0].snapshot else: name = 'error_%s' % func.__name__ description = "Failed in method '%s'." % func.__name__ if args[0].env is not None: try: create_diagnostic_snapshot(args[0].env, "fail", name) except: logger.error(traceback.format_exc()) raise finally: logger.debug(args) args[0].env.make_snapshot(snapshot_name=name[-50:], description=description, is_make=True) raise
def show_step(self, step, details='', initialize=False): """Show a description of the step taken from docstring :param int/str step: step number to show :param str details: additional info for a step """ test_func_name = get_test_method_name() if initialize or step == 1: self.current_log_step = step else: self.current_log_step += 1 if self.current_log_step != step: error_message = 'The step {} should be {} at {}' error_message = error_message.format( step, self.current_log_step, test_func_name ) logger.error(error_message) test_func = getattr(self.__class__, test_func_name) docstring = test_func.__doc__ docstring = '\n'.join([s.strip() for s in docstring.split('\n')]) steps = {s.split('. ')[0]: s for s in docstring.split('\n') if s and s[0].isdigit()} if details: details_msg = ': {0} '.format(details) else: details_msg = '' if str(step) in steps: logger.info("\n" + " " * 55 + "<<< {0} {1}>>>" .format(steps[str(step)], details_msg)) else: logger.info("\n" + " " * 55 + "<<< {0}. (no step description " "in scenario) {1}>>>".format(str(step), details_msg))
def wrapper(*args, **kwargs): result = func(*args, **kwargs) try: if settings.UPLOAD_PATCHSET: if not settings.GERRIT_REFSPEC: raise ValueError('REFSPEC should be set for CI tests.') logger.info("Uploading new patchset from {0}" .format(settings.GERRIT_REFSPEC)) with args[0].environment.d_env.get_admin_remote() as remote: remote.upload(settings.PATCH_PATH.rstrip('/'), '/var/www/nailgun/fuel-ostf') remote.execute('dockerctl shell ostf ' 'bash -c "cd /var/www/nailgun/fuel-ostf; ' 'python setup.py develop"') remote.execute('dockerctl shell ostf ' 'bash -c "supervisorctl restart ostf"') helpers.wait( lambda: "0" in remote.execute('dockerctl shell ostf ' 'bash -c "pgrep [o]stf; echo $?"') ['stdout'][1], timeout=60) logger.info("OSTF status: RUNNING") except Exception as e: logger.error("Could not upload patch set {e}".format(e=e)) raise return result
def wrapper(*args, **kwargs): result = func(*args, **kwargs) try: if settings.UPLOAD_PATCHSET: if not settings.GERRIT_REFSPEC: raise ValueError('REFSPEC should be set for CI tests.') logger.info("Uploading new patchset from {0}" .format(settings.GERRIT_REFSPEC)) remote = SSHClient(args[0].admin_node_ip, username='******', password='******') remote.upload(settings.PATCH_PATH.rstrip('/'), '/tmp/fuel-ostf') remote.execute('source /opt/fuel_plugins/ostf/bin/activate; ' 'cd /tmp/fuel-ostf; python setup.py develop') remote.execute('/etc/init.d/supervisord restart') helpers.wait( lambda: "RUNNING" in remote.execute("supervisorctl status ostf | awk\ '{print $2}'")['stdout'][0], timeout=60) logger.info("OSTF status: RUNNING") except Exception as e: logger.error("Could not upload patch set {e}".format(e=e)) raise return result
def wrapper(*args, **kwargs): result = func(*args, **kwargs) if not settings.UPDATE_FUEL: return result try: environment = get_current_env(args) if not environment: logger.warning("Can't update packages: method of " "unexpected class is decorated.") return result if settings.UPDATE_FUEL_MIRROR: for url in settings.UPDATE_FUEL_MIRROR: repo_url = urlparse(url) cut_dirs = len(repo_url.path.strip('/').split('/')) download_cmd = ('wget --recursive --no-parent' ' --no-verbose --reject "index' '.html*,*.gif" --exclude-directories' ' "{pwd}/repocache" ' '--directory-prefix {path} -nH' ' --cut-dirs={cutd} {url}').\ format(pwd=repo_url.path.rstrip('/'), path=settings.UPDATE_FUEL_PATH, cutd=cut_dirs, url=repo_url.geturl()) return_code = call(download_cmd, shell=True) assert_equal(return_code, 0, 'Mirroring of remote' ' packages ' 'repository failed') centos_files_count, ubuntu_files_count = \ environment.admin_actions.upload_packages( local_packages_dir=settings.UPDATE_FUEL_PATH, centos_repo_path=settings.LOCAL_MIRROR_CENTOS, ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU) if centos_files_count == 0: return result # Add temporary repo with new packages to YUM configuration conf_file = '/etc/yum.repos.d/temporary.repo' cmd = ("echo -e '[temporary]\nname=temporary\nbaseurl=file://{0}/" "\ngpgcheck=0\npriority=1' > {1}").format( settings.LOCAL_MIRROR_CENTOS, conf_file) with environment.d_env.get_admin_remote() as remote: environment.execute_remote_cmd(remote, cmd, exit_code=0) update_command = 'yum clean expire-cache; yum update -y -d3' result = remote.execute(update_command) logger.debug('Result of "yum update" command on master node: ' '{0}'.format(result)) assert_equal(int(result['exit_code']), 0, 'Packages update failed, ' 'inspect logs for details') environment.execute_remote_cmd(remote, cmd='rm -f {0}' .format(conf_file), exit_code=0) except Exception: logger.error("Could not update packages") raise return result
def check_lbass_work(cls, os_conn): # create pool pool = os_conn.create_pool(pool_name='lbaas_pool') logger.debug('pull is {0}'.format(pool)) # create vip vip = os_conn.create_vip(name='lbaas_vip', protocol='HTTP', port=80, pool=pool) logger.debug('vip is {0}'.format(vip)) # get list of vips lb_vip_list = os_conn.get_vips() logger.debug( 'Initial state of vip is {0}'.format( os_conn.get_vip(lb_vip_list['vips'][0]['id']))) # wait for active status try: wait(lambda: os_conn.get_vip( lb_vip_list['vips'][0]['id'])['vip']['status'] == 'ACTIVE', timeout=120 * 60) except: logger.error(traceback.format_exc()) vip_state = os_conn.get_vip( lb_vip_list['vips'][0]['id'])['vip']['status'] asserts.assert_equal( 'ACTIVE', vip_state, "Vip is not active, current state is {0}".format(vip_state))
def ha_mysql_termination(self): if not self.env.d_env.has_snapshot(self.snapshot_name): raise SkipTest() self.env.revert_snapshot(self.snapshot_name) for devops_node in self.env.d_env.nodes().slaves[:3]: with self.fuel_web.get_ssh_for_node(devops_node.name) as remote: logger.info('Terminating MySQL on {0}' .format(devops_node.name)) try: remote.check_call('pkill -9 -x "mysqld"') except: logger.error('MySQL on {0} is down after snapshot revert'. format(devops_node.name)) raise check_mysql(remote, devops_node.name) cluster_id = self.fuel_web.client.get_cluster_id( self.__class__.__name__) self.fuel_web.wait_mysql_galera_is_up(['slave-01', 'slave-02', 'slave-03'], timeout=300) self.fuel_web.run_ostf( cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
def wrapper(*args, **kwargs): logger.info("\n" + "<" * 5 + "#" * 30 + "[ {} ]" .format(func.__name__) + "#" * 30 + ">" * 5 + "\n{}" .format(''.join(func.__doc__))) try: result = func(*args, **kwargs) except SkipTest: raise SkipTest() except Exception: name = 'error_{:s}'.format(func.__name__) store_error_details(name, args[0].env) logger.error(traceback.format_exc()) logger.info("<" * 5 + "*" * 100 + ">" * 5) raise else: if settings.ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT: if args[0].env is None: logger.warning("Can't get diagnostic snapshot: " "unexpected class is decorated.") return result try: args[0].env.resume_environment() create_diagnostic_snapshot(args[0].env, "pass", func.__name__) except: logger.error("Fetching of diagnostic snapshot failed: {0}". format(traceback.format_exc())) return result
def replace_rpm_package(package): """Replaced rpm package.rpm on master node with package.rpm from review """ ssh = SSHManager() logger.info("Patching {}".format(package)) if not settings.UPDATE_FUEL: raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True') try: # Upload package target_path = '/var/www/nailgun/{}/'.format(package) ssh.upload_to_remote( ip=ssh.admin_ip, source=settings.UPDATE_FUEL_PATH.rstrip('/'), target=target_path) package_name = package package_ext = '*.noarch.rpm' pkg_path = os.path.join(target_path, '{}{}'.format(package_name, package_ext)) full_package_name = get_full_filename(wildcard_name=pkg_path) logger.debug('Package name is {0}'.format(full_package_name)) full_package_path = os.path.join(os.path.dirname(pkg_path), full_package_name) # Update package on master node if not does_new_pkg_equal_to_installed_pkg( installed_package=package_name, new_package=full_package_path): update_rpm(path=full_package_path) except Exception: logger.error("Could not upload package") raise
def wrapper(*args, **kwargs): result = func(*args, **kwargs) try: if settings.UPLOAD_MANIFESTS: logger.info( "Uploading new manifests from " "{:s}".format(settings.UPLOAD_MANIFESTS_PATH)) environment = get_current_env(args) if not environment: logger.warning("Can't upload manifests: method of " "unexpected class is decorated.") return result with environment.d_env.get_admin_remote() as remote: remote.execute('rm -rf /etc/puppet/modules/*') remote.upload(settings.UPLOAD_MANIFESTS_PATH, '/etc/puppet/modules/') logger.info( "Copying new site.pp from " "{:s}".format(settings.SITEPP_FOR_UPLOAD)) remote.execute("cp %s /etc/puppet/manifests" % settings.SITEPP_FOR_UPLOAD) if settings.SYNC_DEPL_TASKS: remote.execute("fuel release --sync-deployment-tasks" " --dir /etc/puppet/") except Exception: logger.error("Could not upload manifests") raise return result
def replace_centos_bootstrap(environment): """Replaced initramfs.img in /var/www/nailgun/ with re-builded with review code environment - Environment Model object - self.env """ logger.info("Updating bootstrap") if not settings.UPDATE_FUEL: raise Exception("{} variable don't exist" .format(settings.UPDATE_FUEL)) try: rebuilded_bootstrap = '/var/initramfs.img.updated' with environment.d_env.get_admin_remote() as remote: checkers.check_file_exists( remote, '{0}'.format(rebuilded_bootstrap)) logger.info("Assigning new bootstrap from {}" .format(rebuilded_bootstrap)) bootstrap = "/var/www/nailgun/bootstrap" cmd = ("mv {0}/initramfs.img /var/initramfs.img;" "cp /var/initramfs.img.updated {0}/initramfs.img;" "chmod +r {0}/initramfs.img;" ).format(bootstrap) result = remote.execute(cmd) assert_equal(result['exit_code'], 0, ('Failed to assign bootstrap {}' ).format(result)) cmd = "cobbler sync" environment.base_actions.execute(cmd, exit_code=0) except Exception as e: logger.error("Could not update bootstrap {e}".format(e=e)) raise
def store_astute_yaml_for_one_node(nailgun_node): ssh_manager = SSHManager() if 'roles' not in nailgun_node: return None errmsg = 'Downloading "{0}.yaml" from the {1} failed' msg = 'File "{0}.yaml" was downloaded from the {1}' nodename = nailgun_node['name'] ip = nailgun_node['ip'] for role in nailgun_node['roles']: filename = '{0}/{1}-{2}-{3}.yaml'.format(settings.LOGS_DIR, func_name, nodename, role) if not ssh_manager.isfile_on_remote(ip, '/etc/{0}.yaml'.format(role)): role = 'primary-' + role if ssh_manager.download_from_remote(ip, '/etc/{0}.yaml'.format(role), filename): logger.info(msg.format(role, nodename)) else: logger.error(errmsg.format(role, nodename)) if settings.DOWNLOAD_FACTS: fact_filename = re.sub(r'-\w*\.', '-facts.', filename) generate_facts(ip) if ssh_manager.download_from_remote(ip, '/tmp/facts.yaml', fact_filename): logger.info(msg.format('facts', nodename)) else: logger.error(errmsg.format('facts', nodename))
def execute_through_host(self, ssh, vm_host, cmd, creds=()): try: logger.info("Making intermediate transport") interm_transp = ssh._ssh.get_transport() logger.info("Opening channel to VM") interm_chan = interm_transp.open_channel('direct-tcpip', (vm_host, 22), (ssh.host, 0)) logger.info("Opening paramiko transport") transport = paramiko.Transport(interm_chan) logger.info("Starting client") transport.start_client() logger.info("Passing authentication to VM") if not creds: creds = ('cirros', 'cubswin:)') transport.auth_password(creds[0], creds[1]) logger.info("Opening session") channel = transport.open_session() logger.info("Executing command") channel.exec_command(cmd) logger.info("Getting exit status") output = channel.recv(1024) logger.info("Sending shutdown write signal") channel.shutdown_write() return output except Exception as exc: logger.error("An exception occurred: %s" % exc) return ''
def check_mysql(remote, node_name): check_cmd = 'pkill -0 -x mysqld' check_crm_cmd = ('crm resource status clone_p_mysql |' ' grep -q "is running on: $HOSTNAME"') check_galera_cmd = ("mysql --connect_timeout=5 -sse \"SELECT" " VARIABLE_VALUE FROM" " information_schema.GLOBAL_STATUS" " WHERE VARIABLE_NAME" " = 'wsrep_local_state_comment';\"") try: wait(lambda: remote.execute(check_cmd)['exit_code'] == 0, timeout=300) logger.info('MySQL daemon is started on {0}'.format(node_name)) except TimeoutError: logger.error('MySQL daemon is down on {0}'.format(node_name)) raise _wait(lambda: assert_equal(remote.execute(check_crm_cmd)['exit_code'], 0, 'MySQL resource is NOT running on {0}'.format( node_name)), timeout=60) try: wait(lambda: ''.join(remote.execute( check_galera_cmd)['stdout']).rstrip() == 'Synced', timeout=600) except TimeoutError: logger.error('galera status is {0}'.format(''.join(remote.execute( check_galera_cmd)['stdout']).rstrip())) raise
def get_fixture_relevance(self, actual_tasks, fixture): """Get fixture relevance between actual deployment tasks and tasks from fixture files :param actual_tasks: a list of actual tasks :param fixture: a dictionary with fixture data :return: a tuple of task sets """ actual_tasks = set(actual_tasks) fixture_tasks = set([i.keys()[0] for i in fixture["tasks"]]) tasks_description = self.env.admin_actions.get_tasks_description() extra_actual_tasks = actual_tasks.difference(fixture_tasks) extra_fixture_tasks = fixture_tasks.difference(actual_tasks) # NOTE: in ideal case we need to avoid tasks with wrong types wrong_types = {} for task in fixture["tasks"]: task_name, attrs = task.items()[0] expected_type = self.get_task_type(tasks_description, task_name) if not expected_type: logger.error("No type or no such task {!r}".format(task_name)) else: if expected_type != attrs["type"]: wrong_types.update({task_name: expected_type}) logger.info("Actual tasks {}contain extra tasks: {}" .format("" if extra_actual_tasks else "don't ", extra_actual_tasks)) logger.info("Fixture tasks {}contain extra tasks: {}" .format("" if extra_fixture_tasks else "don't ", extra_fixture_tasks)) return extra_actual_tasks, extra_fixture_tasks, wrong_types
def assert_all_tasks_completed(self, cluster_id=None): cluster_info_template = "\n\tCluster ID: {cluster}{info}\n" all_tasks = sorted(self.get_all_tasks_list(), key=lambda _tsk: _tsk["id"], reverse=True) not_ready_tasks, deploy_tasks = incomplete_tasks(all_tasks, cluster_id) not_ready_transactions = incomplete_deploy( {cluster: self.get_deployment_task_hist(task_id) for cluster, task_id in deploy_tasks.items()} ) if len(not_ready_tasks) > 0: task_details_template = ( "\n" "\t\tTask name: {name}\n" "\t\t\tStatus: {status}\n" "\t\t\tProgress: {progress}\n" "\t\t\tResult: {result}\n" "\t\t\tTask ID: {id}" ) task_text = "Not all tasks completed: {}".format( "".join( cluster_info_template.format( cluster=cluster, info="".join(task_details_template.format(**task) for task in tasks) ) for cluster, tasks in sorted(not_ready_tasks.items()) ) ) logger.error(task_text) if len(not_ready_transactions) == 0: # Else: we will raise assert with detailed info # about deployment assert_true(len(not_ready_tasks) == 0, task_text) fail_deploy(not_ready_transactions)
def __exit__(self, exc_type, exc_value, exc_tb): self.end_time = time.time() self.total_time = self.end_time - self.begin_time # Create a path where the 'self.total_time' will be stored. yaml_path = [] # There will be a list of one or two yaml subkeys: # - first key name is the method name of the test method_name = get_test_method_name() if method_name: yaml_path.append(method_name) # - second (subkey) name is provided from the decorator (the name of # the just executed function), or manually. yaml_path.append(self.name) try: update_yaml(yaml_path, '{:.2f}'.format(self.total_time), self.is_uniq) except Exception: logger.error("Error storing time statistic for {0}" " {1}".format(yaml_path, traceback.format_exc())) if not MASTER_IS_CENTOS7: raise
def regenerate_repo(self, regenerate_script, local_mirror_path): # Uploading scripts that prepare local repositories: # 'regenerate_centos_repo' and 'regenerate_ubuntu_repo' try: self.ssh_manager.upload_to_remote( ip=self.ip, source='{0}/{1}'.format(self.path_scripts, regenerate_script), target=self.remote_path_scripts ) self.ssh_manager.execute_on_remote( ip=self.ip, cmd='chmod 755 {0}/{1}'.format(self.remote_path_scripts, regenerate_script) ) except Exception: logger.error('Could not upload scripts for updating repositories.' '\n{0}'.format(traceback.format_exc())) raise # Update the local repository using previously uploaded script. script_cmd = '{0}/{1} {2} {3}'.format(self.remote_path_scripts, regenerate_script, local_mirror_path, self.ubuntu_release) script_result = self.ssh_manager.execute( ip=self.ip, cmd=script_cmd ) assert_equal(0, script_result['exit_code'], self.assert_msg(script_cmd, script_result['stderr'])) logger.info('Local repository {0} has been updated successfully.' .format(local_mirror_path))
def wrapper(*args, **kwagrs): try: return func(*args, **kwagrs) except SkipTest: raise SkipTest() except Exception as test_exception: exc_trace = sys.exc_traceback if args and 'snapshot' in args[0].__dict__: name = 'error_%s' % args[0].snapshot description = "Failed in method '%s'." % args[0].snapshot else: name = 'error_%s' % func.__name__ description = "Failed in method '%s'." % func.__name__ if args[0].env is not None: try: create_diagnostic_snapshot(args[0].env, "fail", name) except: logger.error("Fetching of diagnostic snapshot failed: {0}". format(traceback.format_exc())) try: admin_remote = args[0].env.get_admin_remote() pull_out_logs_via_ssh(admin_remote, name) except: logger.error("Fetching of raw logs failed: {0}". format(traceback.format_exc())) finally: logger.debug(args) args[0].env.make_snapshot(snapshot_name=name[-50:], description=description, is_make=True) raise test_exception, None, exc_trace
def _cluster_from_config(self, config): """Create cluster from predefined config.""" slaves = len(config.get('nodes')) cluster_name = config.get('name', self._context.__name__) snapshot_name = "ready_cluster_{}".format(cluster_name) if self.check_run(snapshot_name): self.env.revert_snapshot(snapshot_name) cluster_id = self.fuel_web.client.get_cluster_id(cluster_name) self._context._storage['cluster_id'] = cluster_id logger.info("Getted deployed cluster from snapshot") return True elif self.get_ready_slaves(slaves): logger.info("Create env {}".format(cluster_name)) cluster_id = self.fuel_web.create_cluster( name=cluster_name, mode=config.get('mode', settings.DEPLOYMENT_MODE), settings=config.get('settings', {}) ) self._context._storage['cluster_id'] = cluster_id self.fuel_web.update_nodes( cluster_id, config.get('nodes') ) self.fuel_web.verify_network(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.verify_network(cluster_id) self.env.make_snapshot(snapshot_name, is_make=True) self.env.resume_environment() return True else: logger.error("Can't deploy cluster because snapshot" " with bootstrapped nodes didn't revert") raise RuntimeError("Can't deploy cluster because snapshot" " with bootstrapped nodes didn't revert")
def patch_and_assemble_ubuntu_bootstrap(environment): """Replaced initramfs.img in /var/www/nailgun/ with newly_builded from review environment - Environment Model object - self.env """ logger.info("Update fuel-agent code and assemble new ubuntu bootstrap") if not settings.UPDATE_FUEL: raise Exception("{} variable don't exist".format(settings.UPDATE_FUEL)) try: pack_path = '/var/www/nailgun/fuel-agent-review/' with environment.d_env.get_admin_remote() as remote: remote.upload(settings.FUEL_AGENT_REPO_PATH.rstrip('/'), pack_path) # renew code in bootstrap # Step 1 - install squashfs-tools cmd = ("yum install -y squashfs-tools") result = remote.execute(cmd) assert_equal( result['exit_code'], 0, ('Failed to install squashfs-tools {}').format(result)) # Step 2 - unpack bootstrap bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap" bootstrap_var = "/var/root.squashfs" cmd = ("unsquashfs -d /var/root.squashfs {}/root.squashfs" ).format(bootstrap) result = remote.execute(cmd) assert_equal(result['exit_code'], 0, ('Failed to add unpack bootstrap {}').format(result)) # Step 3 - replace fuel-agent code in unpacked bootstrap agent_path = "/usr/lib/python2.7/dist-packages/fuel_agent" bootstrap_file = bootstrap + "/root.squashfs" cmd = ("rsync -r {2}fuel_agent/* {0}{1}/;" "mv {3} /var/root.squashfs.old;").format( bootstrap_var, agent_path, pack_path, bootstrap_file) result = remote.execute(cmd) assert_equal( result['exit_code'], 0, ('Failed to replace fuel-agent code {}').format(result)) # Step 4 - assemble new bootstrap compression = "-comp xz" no_progress_bar = "-no-progress" no_append = "-noappend" image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format( bootstrap_var, bootstrap_file, compression, no_progress_bar, no_append) result = remote.execute(image_rebuild) assert_equal(result['exit_code'], 0, ('Failed to rebuild bootstrap {}').format(result)) checkers.check_file_exists(remote, '{0}'.format(bootstrap_file)) except Exception as e: logger.error("Could not upload package {e}".format(e=e)) raise
def save_logs(url, filename): logger.info('Saving logs to "{}" file'.format(filename)) try: with open(filename, 'w') as f: f.write( urllib2.urlopen(url).read() ) except (urllib2.HTTPError, urllib2.URLError) as e: logger.error(e)
def wait_upgrade_is_done(node_ssh, timeout, phrase): logger.info('Waiting while upgrade is done') cmd = "grep '{0}' /var/log/fuel_upgrade.log".format(phrase) try: wait(lambda: not node_ssh.execute(cmd)['exit_code'], timeout=timeout) except Exception as e: a = node_ssh.execute(cmd) logger.error(e) assert_equal(0, a['exit_code'], a['stderr'])
def get_ceph_partitions(remote, device, type="xfs"): ret = remote.check_call("parted {device} print | grep {type}".format( device=device, type=type))['stdout'] if not ret: logger.error("Partition not present! {partitions}: ".format( remote.check_call("parted {device} print"))) raise Exception logger.debug("Partitions: {part}".format(part=ret)) return ret
def erase_data_from_hdd(remote, device=None, mount_point=None, source="/dev/zero", block_size=512, blocks_from_start=2 * 1024 * 8, blocks_from_end=2 * 1024 * 8): """Erases data on "device" using "dd" utility. :param remote: devops.SSHClient, remote to node :param device: str, block device which should be corrupted. If none - drive mounted at "mount_point" will be used for erasing :param mount_point: str, mount point for auto-detecting drive for erasing :param source: str, block device or file that will be used as source for "dd", default - /dev/zero :param block_size: int, block size which will be pass to "dd" :param blocks_from_start: int, count of blocks which will be erased from the beginning of the hard drive. Default - 16,384 (with bs=512 - 8MB) :param blocks_from_end: int, count of blocks which will be erased from the end of the hard drive. Default - 16,384 (with bs=512 - 8MB) :raises Exception: if return code of any of commands is not 0 """ if not device: asserts.assert_is_not_none( mount_point, "Mount point is not defined, will do nothing") device = remote.execute( "awk '$2 == \"{mount_point}\" {{print $1}}' /proc/mounts".format( mount_point=mount_point))['stdout'][0] # get block device for partition try: device = re.findall(r"(/dev/[a-z]+)", device)[0] except IndexError: logger.error("Can not find any block device in output! " "Output is:'{}'".format(device)) commands = [] logger.debug("Boot sector of device '{}' will be erased".format(device)) if blocks_from_start > 0: commands.append("dd bs={block_size} if={source} of={device} " "count={blocks_from_start}".format( block_size=block_size, source=source, device=device, blocks_from_start=blocks_from_start)) if blocks_from_end > 0: commands.append( "dd bs={block_size} if={source} of={device} " "count={blocks_from_end} " "seek=$((`blockdev --getsz {device}` - {seek}))".format( block_size=block_size, source=source, device=device, blocks_from_end=blocks_from_end, seek=block_size * blocks_from_end)) commands.append("sync") for cmd in commands: run_on_remote(remote, cmd)
def _upload_contrail_packages(self, remote): for pack in self._pack_path: if os.path.splitext(pack)[1] in [".deb", ".rpm"]: pkg_name = os.path.basename(pack) logger.debug("Uploading package {0} " "to master node".format(pkg_name)) remote.upload(pack, self._pack_copy_path) else: logger.error('Failed to upload file')
def setup_customisation(self): self.wait_for_provisioning() try: cmd = "pkill -sigusr1 -f '^.*/fuelmenu$'" with self.d_env.get_admin_remote() as remote: wait(lambda: remote.execute(cmd)['exit_code'] == 0, timeout=60) except Exception: logger.error("Could not kill process of fuelmenu") raise
def execute_on_remote(self, ip, cmd, port=22, err_msg=None, jsonify=False, assert_ec_equal=None, raise_on_assert=True): """Execute ``cmd`` on ``remote`` and return result. :param ip: ip of host :param port: ssh port :param cmd: command to execute on remote host :param err_msg: custom error message :param assert_ec_equal: list of expected exit_code :param raise_on_assert: Boolean :return: dict :raise: Exception """ if assert_ec_equal is None: assert_ec_equal = [0] result = self.execute(ip=ip, port=port, cmd=cmd) if result['exit_code'] not in assert_ec_equal: error_details = { 'command': cmd, 'host': ip, 'stdout': result['stdout'], 'stderr': result['stderr'], 'exit_code': result['exit_code'] } error_msg = (err_msg or "Unexpected exit_code returned:" " actual {0}, expected {1}.".format( error_details['exit_code'], ' '.join( map(str, assert_ec_equal)))) log_msg = ("{0} Command: '{1}' " "Details: {2}".format(error_msg, cmd, error_details)) logger.error(log_msg) if raise_on_assert: raise Exception(log_msg) result['stdout_str'] = ''.join(result['stdout']) result['stdout_len'] = len(result['stdout']) result['stderr_str'] = ''.join(result['stderr']) result['stderr_len'] = len(result['stderr']) if jsonify: try: result['stdout_json'] = \ self._json_deserialize(result['stdout_str']) except Exception: error_msg = ("Unable to deserialize output of command" " '{0}' on host {1}".format(cmd, ip)) logger.error(error_msg) raise Exception(error_msg) return result
def get_mongo_partitions(remote, device): ret = remote.check_call("lsblk | grep {device} | awk {size}".format( device=device, size=re.escape('{print $4}')))['stdout'] if not ret: logger.error("Partition not present! {partitions}: ".format( remote.check_call("parted {device} print"))) raise Exception logger.debug("Partitions: {part}".format(part=ret)) return ret
def _compress_logs(_dirs, _archive_path): cmd = 'tar --absolute-names --warning=no-file-changed -czf {t} {d}'.\ format(t=_archive_path, d=' '.join(_dirs)) result = admin_remote.execute(cmd) if result['exit_code'] != 0: logger.error("Compressing of logs on master node failed: {0}". format(result)) return False return True
def ip_address_show(self, node_name, namespace, interface, pipe_str=''): try: remote = self.get_ssh_for_node(node_name) ret = remote.check_call( 'ip netns exec {0} ip address show {1} {2}'.format( namespace, interface, pipe_str)) return ' '.join(ret['stdout']) except DevopsCalledProcessError as err: logger.error(err.message) return ''
def setup_customisation(self): self.wait_for_provisioning() try: remote = self.get_admin_remote() pid = remote.execute("pgrep 'fuelmenu'")['stdout'][0] pid.rstrip('\n') remote.execute("kill -sigusr1 {0}".format(pid)) except Exception: logger.error("Could not kill pid of fuelmenu") raise
def wrapper(*args, **kwargs): result = func(*args, **kwargs) if settings.UPDATE_FUEL: logger.info("Update fuel's packages from directory {0}.".format( settings.UPDATE_FUEL_PATH)) environment = get_current_env(args) if not environment: logger.warning("Decorator was triggered " "from unexpected class.") return result centos_files_count, ubuntu_files_count = \ environment.admin_actions.upload_packages( local_packages_dir=settings.UPDATE_FUEL_PATH, centos_repo_path=settings.LOCAL_MIRROR_CENTOS, ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU) if not centos_files_count and not ubuntu_files_count: raise ConfigurationException('Nothing to update,' ' packages to update values is 0') cluster_id = environment.fuel_web.get_last_created_cluster() if centos_files_count > 0: with environment.d_env.get_admin_remote() as remote: # Update packages on master node remote.execute('yum -y install yum-plugin-priorities;' 'yum clean expire-cache; yum update -y ' '2>>/var/log/yum-update-error.log') # Add auxiliary repository to the cluster attributes if settings.OPENSTACK_RELEASE_UBUNTU not in \ settings.OPENSTACK_RELEASE: environment.fuel_web.add_local_centos_mirror( cluster_id, path=settings.LOCAL_MIRROR_CENTOS, priority=settings.AUX_RPM_REPO_PRIORITY) if ubuntu_files_count > 0: # Add auxiliary repository to the cluster attributes if settings.OPENSTACK_RELEASE_UBUNTU in \ settings.OPENSTACK_RELEASE: environment.fuel_web.add_local_ubuntu_mirror( cluster_id, name="Auxiliary", path=settings.LOCAL_MIRROR_UBUNTU, priority=settings.AUX_DEB_REPO_PRIORITY) else: logger.error("{0} .DEB files uploaded but won't be used" " because of deploying wrong release!".format( ubuntu_files_count)) if settings.SYNC_DEPL_TASKS: with environment.d_env.get_admin_remote() as remote: remote.execute("fuel release --sync-deployment-tasks" " --dir /etc/puppet/") return result
def get_package_versions_from_node(remote, name, os_type): if os_type and 'Ubuntu' in os_type: cmd = "dpkg-query -W -f='${Version}' %s" % name else: cmd = "rpm -q {0}".format(name) try: result = ''.join(remote.execute(cmd)['stdout']) return result.strip() except Exception: logger.error(traceback.format_exc()) raise
def find_backup(remote): try: arch_dir = ''.join( remote.execute("ls -1u /var/backup/fuel/ | sed -n 1p")['stdout']) arch_path = ''.join( remote.execute("ls -1u /var/backup/fuel/{0}/*.lrz".format( arch_dir.strip()))["stdout"]) return arch_path except Exception as e: logger.error('exception is {0}'.format(e)) raise e
def patch_centos_bootstrap(environment): """Replaced initramfs.img in /var/www/nailgun/ with newly_builded from review environment - Environment Model object - self.env """ logger.info("Update fuel-agent code and assemble new bootstrap") if not settings.UPDATE_FUEL: raise Exception("{} variable don't exist" .format(settings.UPDATE_FUEL)) try: pack_path = '/var/www/nailgun/fuel-agent-review/' with environment.d_env.get_admin_remote() as remote: remote.upload(settings.UPDATE_FUEL_PATH.rstrip('/'), pack_path) # renew code in bootstrap # Step 1 - unpack bootstrap bootstrap_var = "/var/initramfs" bootstrap = "/var/www/nailgun/bootstrap" cmd = ("mkdir {0};" "cp /{1}/initramfs.img {0}/;" "cd {0};" "cat initramfs.img | gunzip | cpio -imudv;").format( bootstrap_var, bootstrap ) result = remote.execute(cmd) assert_equal(result['exit_code'], 0, ('Failed to add unpack bootstrap {}' ).format(result)) # Step 2 - replace fuel-agent code in unpacked bootstrap agent_path = "/usr/lib/python2.7/site-packages/fuel_agent" image_rebuild = "{} | {} | {}".format( "find . -xdev", "cpio --create --format='newc'", "gzip -9 > /var/initramfs.img.updated") cmd = ("rm -rf {0}/initramfs.img;" "rsync -r {2}fuel-agent/fuel_agent/* {0}{1}/;" "cd {0}/;" "{3};" ).format( bootstrap_var, agent_path, pack_path, image_rebuild) result = remote.execute(cmd) assert_equal(result['exit_code'], 0, ('Failed to rebuild bootstrap {}').format(result)) except Exception as e: logger.error("Could not upload package {e}".format(e=e)) raise
def upload_tarball(node_ssh, tar_path, tar_target): assert_true(tar_path, "Source path for uploading 'tar_path' is empty, " "please check test settings!") check_archive_type(tar_path) try: logger.info("Start to upload tar file") node_ssh.upload(tar_path, tar_target) logger.info('File {} was uploaded on master'.format(tar_path)) except Exception: logger.error('Failed to upload file') logger.error(traceback.format_exc())
def update_rpm(env, path, rpm_cmd='/bin/rpm -Uvh --force'): cmd = '{rpm_cmd} {rpm_path}'\ .format(rpm_cmd=rpm_cmd, rpm_path=path) logger.info("Updating rpm '{0}'".format(path)) try: env.base_actions.execute(cmd, exit_code=0) logger.info("Rpm '{0}' has been updated successfully ".format(path)) except Exception as ex: logger.error("Could not update rpm '{0}' in the '{1}'".format( path, ex)) raise
def update_rpm(path, rpm_cmd='/bin/rpm -Uvh --force'): cmd = '{rpm_cmd} {rpm_path}'\ .format(rpm_cmd=rpm_cmd, rpm_path=path) logger.info("Updating rpm '{0}'".format(path)) try: SSHManager().execute(SSHManager().admin_ip, cmd) logger.info("Rpm '{0}' has been updated successfully ".format(path)) except Exception as ex: logger.error("Could not update rpm '{0}' in the '{1}'".format( path, ex)) raise
def get_package_versions_from_node(ip, name, os_type): # Moved from checkers.py for improvement of code if os_type and 'Ubuntu' in os_type: cmd = "dpkg-query -W -f='${Version}' %s" % name else: cmd = "rpm -q {0}".format(name) try: result = ''.join(SSHManager().execute(ip, cmd)['stdout']) return result.strip() except Exception: logger.error(traceback.format_exc()) raise
def load_config_from_file(path_to_conf=None): if not path_to_conf: logger.error("Please, specify file to load config from") raise SkipTest("File with config is not specified. " "Aborting the test") with open(path_to_conf, 'r') as f: try: config = yaml.load(f) return config except ValueError: logger.error("Check config file for consistency") raise
def call_cmd(cmd): """Call wrapper for command.""" try: res = subprocess.check_output( cmd, shell=True, stderr=subprocess.STDOUT).replace('\n', '') except subprocess.CalledProcessError as e: logger.error("Cmd ret code: {0}. Output: {1}".format( e.returncode, e.output)) return e.output return res
def check_distribution(): """Checks whether distribution is supported. :return: None :raise: Exception """ if settings.OPENSTACK_RELEASE not in (settings.OPENSTACK_RELEASE_CENTOS, settings.OPENSTACK_RELEASE_UBUNTU): error_msg = ("{0} distribution is not supported!".format( settings.OPENSTACK_RELEASE)) logger.error(error_msg) raise Exception(error_msg)
def wrapper(*args, **kwargs): logger.info("\n" + "<" * 5 + "#" * 30 + "[ {} ]".format(func.__name__) + "#" * 30 + ">" * 5 + "\n{}".format(''.join(func.__doc__))) try: result = func(*args, **kwargs) except SkipTest: raise SkipTest() except Exception as test_exception: exc_trace = sys.exc_traceback name = 'error_%s' % func.__name__ description = "Failed in method '%s'." % func.__name__ if args[0].env is not None: try: create_diagnostic_snapshot(args[0].env, "fail", name) except: logger.error( "Fetching of diagnostic snapshot failed: {0}".format( traceback.format_exc())) try: with args[0].env.d_env.get_admin_remote()\ as admin_remote: pull_out_logs_via_ssh(admin_remote, name) except: logger.error("Fetching of raw logs failed: {0}".format( traceback.format_exc())) finally: logger.debug(args) try: args[0].env.make_snapshot(snapshot_name=name[-50:], description=description, is_make=True) except: logger.error("Error making the environment snapshot:" " {0}".format(traceback.format_exc())) logger.error(traceback.format_exc()) logger.info("<" * 5 + "*" * 100 + ">" * 5) raise test_exception, None, exc_trace else: if settings.ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT: if args[0].env is None: logger.warning("Can't get diagnostic snapshot: " "unexpected class is decorated.") return result try: args[0].env.resume_environment() create_diagnostic_snapshot(args[0].env, "pass", func.__name__) except: logger.error( "Fetching of diagnostic snapshot failed: {0}".format( traceback.format_exc())) return result
def replace_fuel_agent_rpm(environment): """Replaced fuel_agent*.rpm in MCollective with fuel_agent*.rpm from review environment - Environment Model object - self.env """ logger.info("Patching fuel-agent") if not settings.UPDATE_FUEL: raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True') try: pack_path = '/var/www/nailgun/fuel-agent/' full_pack_path = os.path.join(pack_path, '*.rpm') container = 'mcollective' with environment.d_env.get_admin_remote() as remote: remote.upload(settings.UPDATE_FUEL_PATH.rstrip('/'), pack_path) # Update fuel-agent in MCollective cmd = "rpm -q fuel-agent" old_package = \ environment.base_actions.execute_in_container( cmd, container, exit_code=0) cmd = "rpm -qp {0}".format(full_pack_path) new_package = \ environment.base_actions.execute_in_container( cmd, container) logger.info("Updating package {0} with {1}" .format(old_package, new_package)) cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path) environment.base_actions.execute_in_container( cmd, container, exit_code=0) cmd = "rpm -q fuel-agent" installed_package = \ environment.base_actions.execute_in_container( cmd, container, exit_code=0) assert_equal(installed_package, new_package, "The new package {0} was not installed". format(new_package)) # Update fuel-agent on master node with environment.d_env.get_admin_remote() as remote: cmd = "rpm -Uvh --oldpackage {0}".format( full_pack_path) result = remote.execute(cmd) assert_equal(result['exit_code'], 0, ('Failed to update package {}').format(result)) except Exception as e: logger.error("Could not upload package {e}".format(e=e)) raise
def get_ceph_partitions(ip, device, fs_type="xfs"): # Moved from checkers.py for improvement of code ret = SSHManager().check_call( ip=ip, cmd="parted {device} print | grep {type}".format( device=device, type=fs_type))['stdout'] if not ret: logger.error("Partition not present! {partitions}: ".format( partitions=SSHManager().check_call(ip=ip, cmd="parted {device} print"))) raise Exception() logger.debug("Partitions: {part}".format(part=ret)) return ret
def get_mongo_partitions(ip, device): # Moved from checkers.py for improvement of code ret = SSHManager().check_call( ip=ip, cmd="lsblk | grep {device} | awk {size}".format( device=device, size=re.escape('{print $4}')))['stdout'] if not ret: logger.error("Partition not present! {partitions}: ".format( partitions=SSHManager().check_call(ip=ip, cmd="parted {device} print"))) raise Exception() logger.debug("Partitions: {part}".format(part=ret)) return ret
def make_snapshot(self, snapshot_name, description="", is_make=False): if settings.MAKE_SNAPSHOT or is_make: self.get_virtual_environment().suspend(verbose=False) self.get_virtual_environment().snapshot(snapshot_name, force=True) revert_info(snapshot_name, description) if self.__wrapped__ == 'check_fuel_statistics': self.get_virtual_environment().resume() try: self.nodes().admin. await (self.admin_net, timeout=60) except Exception: logger.error('Admin node is unavailable via SSH after ' 'environment resume ') raise
def get_current_env(args): if args[0].__class__.__name__ == "EnvironmentModel": return args[0] elif args[0].__class__.__name__ == "FuelWebClient": return args[0].environment else: try: return args[0].env except AttributeError as attr_err: logger.error( "Class '{0}' doesn't have 'env' attribute! {1}".format( args[0].__class__.__name__, attr_err.message)) raise
def store_astute_yaml(env, func_name): for node in env.nodes().slaves: nailgun_node = env.fuel_web.get_nailgun_node_by_devops_node(node) if node.driver.node_active(node) and nailgun_node['roles']: try: remote = env.get_ssh_to_remote_by_name(node.name) filename = '{0}/{1}-{2}.yaml'.format(settings.LOGS_DIR, func_name, node.name) logger.info("Storing {0}".format(filename)) if not remote.download('/etc/astute.yaml', filename): logger.error("Downloading 'astute.yaml' from the node " "{0} failed.".format(node.name)) except Exception: logger.error(traceback.format_exc())