def stop(self, force=False, master=True, slave=True): """Stop cluster :param force: Force the cluster to shut down :param master: If exclude master cluster, set False :param slave: If exclude slave cluster, set False """ if not isinstance(force, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='force') logger.error(msg) return if not isinstance(master, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='master') logger.error(msg) return if not isinstance(slave, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='slave') logger.error(msg) return center = Center() center.update_ip_port() success = center.check_hosts_connection() if not success: return center.stop_redis(force, master=master, slave=slave)
def failover(self): """Replace disconnected master with slave If disconnected master comes back to live, it become slave. """ center = Center() center.update_ip_port() master_obj_list = center.get_master_obj_list() msg = color.yellow(message.get('error_no_alive_slave_for_failover')) all_alive = True for node in master_obj_list: if node['status'] != 'connected': all_alive = False success = False for slave in node['slaves']: if slave['status'] == 'connected': msg2 = message.get('redis_failover').format( slave_addr=slave['addr'], master_addr=node['addr'] ) logger.info(msg2) stdout = center.run_failover( slave['addr'], take_over=True ) if stdout != 'OK': continue logger.info('OK') success = True break if not success: logger.info(msg.format(node['addr'])) if all_alive: msg = message.get('already_all_master_alive') logger.info(msg)
def ping(host=None, port=None, all=False): """Send ping command :param all: If true, send command to all :param host: host info for redis :param port: port info for redis """ if not isinstance(all, bool): msg = message.get('error_option_type_not_boolean').format(option='all') logger.error(msg) return if (not host or not port) and not all: msg = message.get('use_host_port_or_option_all') logger.error(msg) return if all: meta = [] ret = RedisCliUtil.command_all_async('ping 2>&1') pong_cnt = 0 for m_s, host, port, result, _ in ret: addr = '{}:{}'.format(host, port) if result == 'OK': pong_cnt += 1 else: meta.append([m_s, addr, color.red('FAIL')]) if meta: utils.print_table([['TYPE', 'ADDR', 'RESULT']] + meta) msg = message.get('counting_alive_redis') msg = msg.format(alive=pong_cnt, total=len(ret)) logger.info(msg) return if host and port: _command('ping', False, host, port)
def set(self, key, value, all=False, save=False, host=None, port=None): """Command: redis-cli config set :param key: target key :param value: value to set :param save: If true, save value to config file :param all: If true, send command to all redis :param host: host info for redis :param port: port info for redis """ if not isinstance(all, bool): msg = m.get('error_option_type_not_boolean') msg = msg.format(options='all') logger.error(msg) return if not isinstance(save, bool): msg = m.get('error_option_type_not_boolean') msg = msg.format(options='save') logger.error(msg) return if (not host or not port) and not all: msg = m.get('use_host_port_or_option_all') logger.error(msg) return sub_cmd = 'config set {key} {value} 2>&1'.format(key=key, value=value) if all: meta = [] ret = RedisCliUtil.command_all_async(sub_cmd) ok_cnt = 0 for m_s, host, port, result, message in ret: addr = '{}:{}'.format(host, port) if result == 'OK': if utils.to_str(message) == 'OK': ok_cnt += 1 else: meta.append([m_s, addr, color.red(message)]) else: meta.append([m_s, addr, color.red('FAIL')]) if meta: utils.print_table([['TYPE', 'ADDR', 'RESULT']] + meta) logger.info('success {}/{}'.format(ok_cnt, len(ret))) else: output = RedisCliUtil.command(sub_cmd=sub_cmd, host=host, port=port, formatter=self.no_print) output = output.strip() if output == "OK": logger.info(output) else: logger.error(output) if save: RedisCliUtil.save_redis_template_config(key, value) center = Center() center.update_ip_port() success = center.check_hosts_connection() if not success: return center.configure_redis() center.sync_conf()
def _handle(text): if text == '': return if text == 'clear': utils.clear_screen() return text = text.replace('-- --help', '?') text = text.replace('--help', '?') text = text.replace('?', '-- --help') err_flg = True try: fire.Fire(component=Command, command=text) err_flg = False except KeyboardInterrupt: msg = message.get('cancel_command_input') logger.warning('\b\b' + msg) except KeyError as ex: logger.warn('[%s] command fail' % text) logger.exception(ex) except TypeError as ex: logger.exception(ex) except IOError as ex: if ex.errno == 2: msg = message.get('error_file_not_exist').format(file=ex.filename) logger.error(msg) else: logger.exception(ex) except EOFError: msg = message.get('cancel_command_input') logger.warning('\b\b' + msg) except utils.CommandError as ex: logger.exception(ex) except FireError: pass except FireExit: pass except ( HostNameError, HostConnectionError, SSHConnectionError, FileNotExistError, YamlSyntaxError, PropsSyntaxError, PropsKeyError, PropsError, SSHCommandError, ClusterRedisError, ClusterNotExistError, ClusterIdError, EnvError, ) as ex: logger.error('{}: {}'.format(ex.class_name(), str(ex))) except LightningDBError as ex: logger.error('[ErrorCode {}] {}'.format(ex.error_code, str(ex))) except BaseException as ex: logger.exception(ex) finally: return err_flg
def start(self, profile=False, master=True, slave=True): """Start cluster :param master: If exclude master cluster, set False :param slave: If exclude slave cluster, set False """ logger.debug("command 'cluster start'") if not isinstance(profile, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='profile') logger.error(msg) return if not isinstance(master, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='master') logger.error(msg) return if not isinstance(slave, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='slave') logger.error(msg) return center = Center() center.update_ip_port() success = center.check_hosts_connection() if not success: return center.ensure_cluster_exist() if master: master_alive_count = center.get_alive_master_redis_count() master_alive_count_mine = center.get_alive_master_redis_count( check_owner=True ) not_mine_count = master_alive_count - master_alive_count_mine if not_mine_count > 0: msg = message.get('error_cluster_start_master_collision') msg = '\n'.join(msg).format(count=not_mine_count) raise LightningDBError(11, msg) if slave: slave_alive_count = center.get_alive_slave_redis_count() slave_alive_count_mine = center.get_alive_slave_redis_count( check_owner=True ) not_mine_count = slave_alive_count - slave_alive_count_mine if not_mine_count > 0: msg = message.get('error_cluster_start_slave_collision') msg = '\n'.join(msg).format(count=not_mine_count) raise LightningDBError(12, msg) center.backup_server_logs(master=master, slave=slave) center.create_redis_data_directory() # equal to cluster.configure() center.configure_redis() center.sync_conf(show_result=True) center.start_redis_process(profile, master=master, slave=slave) center.wait_until_all_redis_process_up(master=master, slave=slave)
def base_directory(default='~/tsr2'): logger.debug('ask base directory') result = ask(message.get('ask_base_directory'), default=default) if not result.startswith(('~', '/')): logger.error(message.get('error_invalid_path').format(value=result)) return base_directory() logger.info('OK, {}'.format(result)) cli_config = config.get_cli_config() cli_config['base_directory'] = result config.save_cli_config(cli_config) return result
def ssd_count(save, default=None): logger.debug('ask ssd count') deploy_history = config.get_deploy_history() if not default: default = deploy_history['ssd_count'] result = int(askInt(message.get('ask_ssd_count'), default=str(default))) if result <= 0: logger.error(message.get('error_ssd_count_less_than_1')) return ssd_count(save=save, default=default) if save: deploy_history['ssd_count'] = result config.save_deploy_history(deploy_history) logger.info('OK, {}'.format(result)) return result
def replicas(save, default=None): logger.debug('ask replicas') deploy_history = config.get_deploy_history() if not default: default = deploy_history['replicas'] result = askInt(message.get('ask_replicas'), default=str(default)) result = int(result) if result < 0: logger.error(message.get('error_replicas_less_than_0')) return replicas(save, default=default) if save: deploy_history['replicas'] = result config.save_deploy_history(deploy_history) logger.info('OK, {}'.format(result)) return result
def beeline(self, **kargs): """Connect to thriftserver command line """ logger.debug('thriftserver_command_beeline') _check_spark() cluster_id = config.get_cur_cluster_id() path_of_fb = config.get_path_of_fb(cluster_id) ths_props_path = path_of_fb['thrift_properties'] cmd = 'source {}; echo ${}'.format(ths_props_path, 'HIVE_HOST') host = sp.check_output(cmd, shell=True).decode('utf-8').strip() cmd = 'source {}; echo ${}'.format(ths_props_path, 'HIVE_PORT') port = sp.check_output(cmd, shell=True).decode('utf-8').strip() spark_env = _get_env() base_cmd = '{}/beeline'.format(spark_env['spark_bin']) options = { 'u': 'jdbc:hive2://{}:{}'.format(host, port), 'n': os.environ['USER'] } for key, value in kargs.items(): options[key] = value for key, value in options.items(): base_cmd += ' -{} {}'.format(key, value) logger.debug(base_cmd) msg = message.get('try_connection') logger.info(msg) os.system(base_cmd)
def host_for_monitor(host_list): formatted = [] for i, v in enumerate(host_list): formatted.append(' ({}) {}'.format(i + 1, v)) stringfied_list = '\n'.join(formatted) msg = '\n'.join(message.get('ask_host_for_monitor')) msg = msg.format(list=stringfied_list) target_num = int(askInt(msg, default='1')) while True: if target_num > 0 and target_num <= len(host_list): break msg = message.get('error_select_number').format( max_number=len(host_list)) logger.error(msg) target_num = int(askInt('')) return host_list[target_num - 1]
def __init__(self, cluster_id, **kwargs): message = m.get('error_cluster_not_exist') message = message.format(cluster_id=cluster_id) if 'host' in kwargs.keys(): self.host = kwargs['host'] message = "{} at '{}'".format(message, self.host) LtcliBaseError.__init__(self, message, *kwargs)
def delete(self, cluster_id): """Delete cluster It is automatically backed up with timestamps as tags :param cluster_id: target cluster id """ if not cluster_util.validate_id(cluster_id): raise ClusterIdError(cluster_id) center = Center() center.update_ip_port() success = center.check_hosts_connection() if success: center.stop_redis(force=True) path_of_fb = config.get_path_of_fb(cluster_id) props_path = path_of_fb['redis_properties'] hosts = config.get_props(props_path, 'sr2_redis_master_hosts', []) if not center.check_include_localhost(hosts): hosts += [config.get_local_ip()] tag = time.strftime("%Y%m%d%H%M%S", time.gmtime()) cluster_backup_dir = 'cluster_{}_bak_{}'.format(cluster_id, tag) for host in hosts: center.cluster_backup(host, cluster_id, cluster_backup_dir) msg = message.get('cluster_delete_complete') msg = msg.format(cluster_id=cluster_id) logger.info(msg)
def run_sync(host=None): """Import clusters from the host """ if host is None: logger.error('host information is not available') return None cluster_base = config.get_base_directory() if not os.path.exists(cluster_base): logger.error('cluster does not exist on the localhost.') os.mkdir(cluster_base) cluster_set = set( filter(lambda x: re.match(r'cluster_[\d]+', x), os.listdir(cluster_base))) client = net.get_ssh(host) if not net.is_dir(client, cluster_base): logger.error('cluster does not exist on the host({}).'.format(host)) return None target_cluster_set = set( filter( lambda x: re.match(r'cluster_[\d]+', x), net.ssh_execute(client, 'ls {}'.format(cluster_base))[1].split())) conflict_cluster = cluster_set & target_cluster_set import_target = (cluster_set ^ target_cluster_set) & target_cluster_set for cluster in conflict_cluster: msg = message.get('ask_cluster_overwrite').format( cluster=" ".join(cluster.split('_'))) overwrite = ask_util.askBool(msg, default='n') if overwrite: import_target.add(cluster) os.system("rm -rf {}".format(cluster_base + "/" + cluster)) for target in import_target: os.system("rsync -a {} {}".format( host + ":" + cluster_base + "/" + target, cluster_base)) logger.info("Importing cluster complete...")
def run_deploy(cluster_id=None, history_save=True, clean=False, strategy="none"): """Install LightningDB package. :param cluster_id: cluster id :param history_save: save input history and use as default :param clean: delete redis log, node configuration :param strategy: none(default): normal deploy, zero-downtime: re-deploy without stop """ # validate cluster id if cluster_id is None: cluster_id = config.get_cur_cluster_id(allow_empty_id=True) if cluster_id < 0: msg = message.get('error_invalid_cluster_on_deploy') logger.error(msg) return if not cluster_util.validate_id(cluster_id): raise ClusterIdError(cluster_id) # validate option if not isinstance(history_save, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='history-save') logger.error(msg) return logger.debug("option '--history-save': {}".format(history_save)) if not isinstance(clean, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='clean') logger.error(msg) return logger.debug("option '--clean': {}".format(clean)) strategy_list = ["none", "zero-downtime"] if strategy not in strategy_list: msg = message.get('error_deploy_strategy').format(value=strategy, list=strategy_list) logger.error(msg) return if strategy == "zero-downtime": run_cluster_use(cluster_id) _deploy_zero_downtime(cluster_id) return _deploy(cluster_id, history_save, clean)
def failover_with_dir(self, server, dir): """Find masters that use the specified directory path and do failover with its slave :param server: IP or hostname :param dir: directory path """ center = Center() center.update_ip_port() logger.debug('failover_with_dir') master_nodes = center.get_master_obj_list() cluster_id = config.get_cur_cluster_id() lib_path = config.get_ld_library_path(cluster_id) path_of_fb = config.get_path_of_fb(cluster_id) sr2_redis_bin = path_of_fb['sr2_redis_bin'] env_cmd = [ 'GLOBIGNORE=*;', 'export LD_LIBRARY_PATH={};'.format(lib_path['ld_library_path']), 'export DYLD_LIBRARY_PATH={};'.format( lib_path['dyld_library_path']), ] redis_cli_cmd = os.path.join(sr2_redis_bin, 'redis-cli') # Find masters with dir ret = RedisCliUtil.command_all_async('config get dir', slave=True) outs = '' meta = [] m_endpoint = [] for node in master_nodes: m_endpoint.append(node['addr']) for _, host, port, res, stdout in ret: if res == 'OK': flat_stdout = '\n'.join([outs, stdout]) line = flat_stdout.splitlines() if self.compare_ip(host, server) and dir in line[2]: endpoint = '{}:{}'.format(socket.gethostbyname(host), port) if endpoint in m_endpoint: meta.append(endpoint) else: logger.warning("FAIL {}:{} {}".format(host, port, stdout)) for endpoint in meta: for master_node in master_nodes: if endpoint == master_node['addr']: for slave_node in master_node['slaves']: addr = slave_node['addr'] (s_host, s_port) = addr.split(':') sub_cmd = 'cluster failover takeover' command = '{} {} -h {} -p {} {}'.format( ' '.join(env_cmd), redis_cli_cmd, s_host, s_port, sub_cmd, ) self._print( message.get('try_failover_takeover').format( slave=addr)) stdout = subprocess.check_output(command, shell=True) self._print(stdout)
def __init__(self, exit_status, host, stderr, *args): self.exit_status = exit_status self.host = host self.stderr = stderr message = m.get('error_ssh_command_execute').format(code=exit_status, host=host, stderr=stderr) LtcliBaseError.__init__(self, message, *args)
def failback(self): """Restart disconnected redis """ center = Center() center.update_ip_port() master_obj_list = center.get_master_obj_list() disconnected_list = [] paused_list = [] for master in master_obj_list: if master['status'] == 'disconnected': disconnected_list.append(master['addr']) if master['status'] == 'paused': paused_list.append(master['addr']) for slave in master['slaves']: if slave['status'] == 'disconnected': disconnected_list.append(slave['addr']) if slave['status'] == 'paused': paused_list.append(slave['addr']) classified_disconnected_list = {} classified_paused_list = {} for disconnected in disconnected_list: host, port = disconnected.split(':') if host not in classified_disconnected_list: classified_disconnected_list[host] = [] classified_disconnected_list[host].append(port) for paused in paused_list: host, port = paused.split(':') if host not in classified_paused_list: classified_paused_list[host] = [] classified_paused_list[host].append(port) current_time = time.strftime("%Y%m%d-%H%M", time.gmtime()) for host, ports in classified_disconnected_list.items(): msg = message.get('redis_run') msg = msg.format(host=host, ports='|'.join(ports)) logger.info(msg) center.run_redis_process(host, ports, False, current_time) for host, ports in classified_paused_list.items(): msg = message.get('redis_restart') msg = msg.format(host=host, ports='|'.join(ports)) logger.info(msg) center.stop_redis_process(host, ports) center.run_redis_process(host, ports, False, current_time) if not classified_disconnected_list and not classified_paused_list: msg = message.get('already_all_redis_alive') logger.info(msg)
def use(self, cluster_id): """Change selected cluster :param cluster_id: target cluster # """ _change_cluster(cluster_id) cluster_id = '-' if cluster_id == -1 else cluster_id msg = message.get('use_cluster').format(cluster_id=cluster_id) logger.info(msg)
def get(self, key, all=False, host=None, port=None): """Command: redis-cli config get :param key: redis config keyword :param all: If true, send command to all redis :param host: host info for redis :param port: port info for redis """ if not isinstance(all, bool): msg = m.get('error_option_type_not_boolean') msg = msg.format(options='all') logger.error(msg) return if (not host or not port) and not all: msg = m.get('use_host_port_or_option_all') logger.error(msg) return sub_cmd = 'config get "{key}" 2>&1'.format(key=key) if all: meta = [] ret = RedisCliUtil.command_all_async(sub_cmd) for m_s, host, port, result, message in ret: addr = '{}:{}'.format(host, port) if result == 'OK': if message: _, value = message.split('\n') meta.append([m_s, addr, value]) else: meta.append([m_s, addr, color.red('Invalid Key')]) else: meta.append([m_s, addr, color.red(result)]) utils.print_table([['TYPE', 'ADDR', 'RESULT']] + meta) else: output = RedisCliUtil.command(sub_cmd=sub_cmd, host=host, port=port, formatter=self.no_print) output = output.strip() if output: key, value = output.split('\n') logger.info(value) else: msg = m.get('error_invalid_key').format(key=key) logger.error(msg)
def clean(self, logs=False): """Clean cluster Delete redis config, data, node configuration. :param log: Delete log of redis """ if not isinstance(logs, bool): msg = message.get('error_option_type_not_boolean') msg = msg.format(option='logs') logger.error(msg) return center = Center() center.update_ip_port() if logs: center.remove_all_of_redis_log_force() return center.cluster_clean() msg = message.get('apply_after_restart') logger.info(msg)
def prefix_of_db_path(save, default=None): logger.debug('ask_prefix_of_db_path') deploy_history = config.get_deploy_history() if not default: default = deploy_history['prefix_of_db_path'] result = ask(message.get('ask_db_path'), default=default) result = result.strip() if save: deploy_history['prefix_of_db_path'] = result config.save_deploy_history(deploy_history) logger.info('OK, {}'.format(result)) return result
def _edit_conf(self, target_path, syntax=None): tmp_target_path = target_path + '.tmp' if os.path.exists(tmp_target_path): msg = message.get('ask_load_history_of_previous_modification') yes = ask_util.askBool(msg) if not yes: os.remove(tmp_target_path) if not os.path.exists(tmp_target_path): shutil.copy(target_path, tmp_target_path) editor.edit(tmp_target_path, syntax=syntax) shutil.copy(tmp_target_path, target_path) os.remove(tmp_target_path)
def create(host_port_list, max_slots=1024): conns = [] try: for host, port in set(host_port_list): t = Connection(host, port) conns.append(t) _ensure_cluster_status_unset(t) logging.info('Instance at %s:%d checked', t.host, t.port) msg = message.get('cluster_meet') logger.info(msg) logger.info(' - {}:{}'.format(conns[0].host, conns[0].port)) first_conn = conns[0] for i, t in enumerate(conns[1:]): logger.info(' - {}:{}'.format(t.host, t.port)) _create(t, first_conn) sleep(0.02) slots_each = SLOT_COUNT // len(conns) slots_residue = SLOT_COUNT - slots_each * len(conns) slots_each += 1 prev = 0 for i, t in enumerate(conns[0:]): if i == slots_residue: slots_each -= 1 msg = ' - {}:{}, {}'.format(t.host, t.port, slots_each) logger.info(msg) _add_slots_range(t, prev, prev + slots_each, max_slots) sleep(0.02) logging.info('Add %d slots to %s:%d', slots_each, t.host, t.port) prev = prev + slots_each msg = message.get('check_cluster_state_assign_slot') logger.info(msg) for t in conns: _poll_check_status(t) logger.info('Ok') finally: for t in conns: t.close()
def reset_oom(all=False, host=None, port=0): """Send reset oom command :param all: If true, send command to all :param host: host info for redis :param port: port info for redis """ if not isinstance(all, bool): msg = message.get('error_option_type_not_boolean').format(option='all') logger.error(msg) return sub_cmd = 'resetOom' _command(sub_cmd, all, host, port)
def save_redis_template_config(key, value): """Save redis template config to file :param key: key :param value: value """ key = key.strip() cluster_id = config.get_cur_cluster_id() path_of_fb = config.get_path_of_fb(cluster_id) master_template = path_of_fb['master_template'] msg = message.get('save_config_to_template') logger.info(msg) RedisCliUtil._save_config(master_template, key, value)
def hosts(save, default=None): logger.debug('ask host') deploy_history = config.get_deploy_history() if not default: default = deploy_history['hosts'] msg = message.get('ask_hosts') result = ask(msg, default=', '.join(default)) result = list(map(lambda x: x.strip(), result.split(','))) if save: deploy_history['hosts'] = result config.save_deploy_history(deploy_history) logger.info('OK, {}'.format(result)) return result
def get_installers_from_fb_s3(maximum_number=5): '''bring up to maximum_number installers in the latest order from s3 default value of maximum_value is 5 if there is problem with url or network connection is fail, return empty list return [{ name:string: file name url:string: download url type:string: url type }] ''' ret = [] url = 'https://flashbase.s3.ap-northeast-2.amazonaws.com/latest/latest.html' warning_msg = "Fail to load installer list from '{}'".format(url) try: res = requests.get(url) status_code = res.status_code if status_code >= 400: msg = message.get('error_http_request') msg = msg.format(code=status_code, msg=warning_msg) logger.warning(msg) res_text = str(res.text) res_text = list(map(lambda x: x.strip(), res_text.split('\n'))) filtered = list(filter(lambda x: x.startswith('<a href='), res_text)) for text in filtered: if maximum_number <= 0: break link = parser.get_word_between(text, '<a href="', '">') name = parser.get_word_between(text, '<a href=".*">', '/*</a>') ret.append({'name': name, 'url': link, 'type': 'download'}) maximum_number -= 1 return ret except requests.exceptions.ConnectionError: msg = message.get('error_http_connection').format(msg=warning_msg) logger.warning(msg) return []
def run_monitor(n=10, t=2): """Monitoring logs of redis. :param n: number of lines to print log :param t: renewal cycle(sec) """ if not isinstance(n, int): msg = message.get('error_option_type_not_number').format(option='n') logger.error(msg) return if not isinstance(t, int) and not isinstance(t, float): msg = message.get('error_option_type_not_float').format(option='t') logger.error(msg) return try: sp.check_output('which tail', shell=True) except Exception: msg = message.get('error_not_found_command_tail') logger.error(msg) return cluster_id = config.get_cur_cluster_id() path_of_fb = config.get_path_of_fb(cluster_id) sr2_redis_log = path_of_fb['sr2_redis_log'] log_files = '{}/servers*'.format(sr2_redis_log) host_list = config.get_master_host_list() target_host = ask_util.host_for_monitor(host_list) try: sp.check_output('which watch', shell=True) command = "ssh -t {} watch -n {} 'tail -n {} {}'".format( target_host, t, n, log_files) sp.call(command, shell=True) except Exception: msg = message.get('error_not_found_command_watch') logger.warning(msg) logger.info(message.get('message_for_exit')) command = "tail -F -s {} {}".format(t, log_files) client = net.get_ssh(target_host) net.ssh_execute_async(client, command)
def set_level(level): """Change log level. :param level: debug / info / warning / error """ level_list = ['debug', 'info', 'warning', 'error', 'warn'] if level not in level_list: level_list.remove('warn') logger.error("LogLevelError: '{}'. Select in {}".format( level, level_list)) return code = get_log_code(level) stream_handler.level = code print(color.white(message.get('change_log_level').format(level=level)))