def set(self, key, value, all=False, save=False, host=None, port=None): """Command: redis-cli config set :param key: target key :param value: value to set :param save: If true, save value to config file :param all: If true, send command to all redis :param host: host info for redis :param port: port info for redis """ if not isinstance(all, bool): msg = m.get('error_option_type_not_boolean') msg = msg.format(options='all') logger.error(msg) return if not isinstance(save, bool): msg = m.get('error_option_type_not_boolean') msg = msg.format(options='save') logger.error(msg) return if (not host or not port) and not all: msg = m.get('use_host_port_or_option_all') logger.error(msg) return sub_cmd = 'config set {key} {value} 2>&1'.format(key=key, value=value) if all: meta = [] ret = RedisCliUtil.command_all_async(sub_cmd) ok_cnt = 0 for m_s, host, port, result, message in ret: addr = '{}:{}'.format(host, port) if result == 'OK': if utils.to_str(message) == 'OK': ok_cnt += 1 else: meta.append([m_s, addr, color.red(message)]) else: meta.append([m_s, addr, color.red('FAIL')]) if meta: utils.print_table([['TYPE', 'ADDR', 'RESULT']] + meta) logger.info('success {}/{}'.format(ok_cnt, len(ret))) else: output = RedisCliUtil.command(sub_cmd=sub_cmd, host=host, port=port, formatter=self.no_print) output = output.strip() if output == "OK": logger.info(output) else: logger.error(output) if save: RedisCliUtil.save_redis_template_config(key, value) center = Center() center.update_ip_port() success = center.check_hosts_connection() if not success: return center.configure_redis() center.sync_conf()
def tree(self): """The results of 'cli cluster nodes' are displayed in tree format """ center = Center() center.update_ip_port() master_node_list = center.get_master_obj_list() output_msg = [] for master_node in master_node_list: addr = master_node['addr'] status = master_node['status'] msg = '{}({})'.format(addr, status) if status == 'disconnected': msg = color.red(msg) if status == 'paused': msg = color.yellow(msg) output_msg.append(msg) for slave_node in master_node['slaves']: addr = slave_node['addr'] status = slave_node['status'] msg = '{}({})'.format(addr, status) if status == 'disconnected': msg = color.red(msg) if status == 'paused': msg = color.yellow(msg) output_msg.append('|__ ' + msg) output_msg.append('') logger.info(color.ENDC + '\n'.join(output_msg))
def ping(host=None, port=None, all=False): """Send ping command :param all: If true, send command to all :param host: host info for redis :param port: port info for redis """ if not isinstance(all, bool): msg = message.get('error_option_type_not_boolean').format(option='all') logger.error(msg) return if (not host or not port) and not all: msg = message.get('use_host_port_or_option_all') logger.error(msg) return if all: meta = [] ret = RedisCliUtil.command_all_async('ping 2>&1') pong_cnt = 0 for m_s, host, port, result, _ in ret: addr = '{}:{}'.format(host, port) if result == 'OK': pong_cnt += 1 else: meta.append([m_s, addr, color.red('FAIL')]) if meta: utils.print_table([['TYPE', 'ADDR', 'RESULT']] + meta) msg = message.get('counting_alive_redis') msg = msg.format(alive=pong_cnt, total=len(ret)) logger.info(msg) return if host and port: _command('ping', False, host, port)
def get(self, key, all=False, host=None, port=None): """Command: redis-cli config get :param key: redis config keyword :param all: If true, send command to all redis :param host: host info for redis :param port: port info for redis """ if not isinstance(all, bool): msg = m.get('error_option_type_not_boolean') msg = msg.format(options='all') logger.error(msg) return if (not host or not port) and not all: msg = m.get('use_host_port_or_option_all') logger.error(msg) return sub_cmd = 'config get "{key}" 2>&1'.format(key=key) if all: meta = [] ret = RedisCliUtil.command_all_async(sub_cmd) for m_s, host, port, result, message in ret: addr = '{}:{}'.format(host, port) if result == 'OK': if message: _, value = message.split('\n') meta.append([m_s, addr, value]) else: meta.append([m_s, addr, color.red('Invalid Key')]) else: meta.append([m_s, addr, color.red(result)]) utils.print_table([['TYPE', 'ADDR', 'RESULT']] + meta) else: output = RedisCliUtil.command(sub_cmd=sub_cmd, host=host, port=port, formatter=self.no_print) output = output.strip() if output: key, value = output.split('\n') logger.info(value) else: msg = m.get('error_invalid_key').format(key=key) logger.error(msg)
def _deploy(cluster_id, history_save, clean): deploy_state = DeployUtil().get_state(cluster_id) if deploy_state == DEPLOYED: msg = message.get('ask_deploy_again') msg = msg.format(cluster_id=cluster_id) msg = color.yellow(msg) yes = ask_util.askBool(msg, default='n') if not yes: logger.info(message.get('cancel')) return restore_yes = None no_localhost = False current_time = time.strftime("%Y%m%d%H%M%S", time.gmtime()) cluster_backup_dir = 'cluster_{}_bak_{}'.format(cluster_id, current_time) conf_backup_dir = 'cluster_{}_conf_bak_{}'.format(cluster_id, current_time) tmp_backup_dir = 'cluster_{}_conf_bak_{}'.format(cluster_id, 'tmp') meta = [['NAME', 'VALUE']] path_of_fb = config.get_path_of_fb(cluster_id) conf_path = path_of_fb['conf_path'] props_path = path_of_fb['redis_properties'] cluster_path = path_of_fb['cluster_path'] path_of_cli = config.get_path_of_cli(cluster_id) conf_backup_path = path_of_cli['conf_backup_path'] tmp_backup_path = os.path.join(conf_backup_path, tmp_backup_dir) local_ip = config.get_local_ip() # ask installer installer_path = ask_util.installer() installer_name = os.path.basename(installer_path) meta.append(['installer', installer_name]) # ask restore conf if deploy_state == DEPLOYED: restore_yes = ask_util.askBool(message.get('ask_restore_conf')) meta.append(['restore', restore_yes]) # input props hosts = [] if deploy_state == DEPLOYED: if restore_yes: meta += DeployUtil().get_meta_from_props(props_path) hosts = config.get_props(props_path, 'sr2_redis_master_hosts') else: if not os.path.isdir(conf_backup_path): os.mkdir(conf_backup_path) if os.path.exists(tmp_backup_path): msg = message.get('ask_load_history_of_previous_modification') yes = ask_util.askBool(msg) if not yes: shutil.rmtree(tmp_backup_path) if not os.path.exists(tmp_backup_path): os.mkdir(tmp_backup_path) shutil.copy(os.path.join(conf_path, 'redis.properties'), os.path.join(tmp_backup_path, 'redis.properties')) tmp_props_path = os.path.join(tmp_backup_path, 'redis.properties') editor.edit(tmp_props_path, syntax='sh') meta += DeployUtil().get_meta_from_props(tmp_props_path) hosts = config.get_props(tmp_props_path, 'sr2_redis_master_hosts') else: # new deploy props_dict = ask_util.props(cluster_id, save=history_save) hosts = props_dict['hosts'] meta += DeployUtil().get_meta_from_dict(props_dict) utils.print_table(meta) msg = message.get('confirm_deploy_information') yes = ask_util.askBool(msg) if not yes: logger.info(message.get('cancel')) return # check node status success = Center().check_hosts_connection(hosts, True) if not success: msg = message.get('error_exist_unavailable_host') logger.error(msg) return logger.debug('Connection of all hosts ok.') success = Center().check_include_localhost(hosts) if not success: no_localhost = True # get port info if deploy_state == DEPLOYED: if restore_yes: key = 'sr2_redis_master_ports' m_ports = config.get_props(props_path, key, []) key = 'sr2_redis_slave_ports' s_ports = config.get_props(props_path, key, []) replicas = len(s_ports) // len(m_ports) else: key = 'sr2_redis_master_ports' m_ports = config.get_props(tmp_props_path, key, []) key = 'sr2_redis_slave_ports' s_ports = config.get_props(tmp_props_path, key, []) replicas = len(s_ports) // len(m_ports) else: m_ports = props_dict['master_ports'] s_ports = props_dict['slave_ports'] replicas = props_dict['replicas'] while True: msg = message.get('check_port') logger.info(msg) host_ports_list = [] for host in hosts: host_ports_list.append((host, m_ports + s_ports)) conflict = Center().check_port_is_enable(host_ports_list) if not conflict: logger.info("OK") break utils.print_table([["HOST", "PORT"]] + conflict) msg = message.get('ask_port_collision') msg = color.yellow(msg) yes = ask_util.askBool(msg) if yes: logger.info("OK") break m_ports = ask_util.master_ports(False, cluster_id) replicas = ask_util.replicas(False) s_ports = ask_util.slave_ports(cluster_id, len(m_ports), replicas) if deploy_state == DEPLOYED: if restore_yes: key = 'sr2_redis_master_ports' value = cluster_util.convert_list_2_seq(m_ports) config.set_props(props_path, key, value) key = 'sr2_redis_slave_ports' value = cluster_util.convert_list_2_seq(s_ports) config.set_props(props_path, key, value) else: key = 'sr2_redis_master_ports' value = cluster_util.convert_list_2_seq(m_ports) config.set_props(tmp_props_path, key, value) key = 'sr2_redis_slave_ports' value = cluster_util.convert_list_2_seq(s_ports) config.set_props(tmp_props_path, key, value) else: props_dict['master_ports'] = m_ports props_dict['slave_ports'] = s_ports props_dict['replicas'] = replicas # if pending, delete legacy on each hosts if no_localhost: if DeployUtil().get_state(cluster_id, local_ip) == PENDING: client = net.get_ssh(local_ip) command = 'rm -rf {}'.format(cluster_path) net.ssh_execute(client=client, command=command) client.close() for host in hosts: if DeployUtil().get_state(cluster_id, host) == PENDING: client = net.get_ssh(host) command = 'rm -rf {}'.format(cluster_path) net.ssh_execute(client=client, command=command) client.close() # added_hosts = post_hosts - pre_hosts msg = message.get('check_cluster_exist') logger.info(msg) added_hosts = set(hosts) meta = [] if deploy_state == DEPLOYED: pre_hosts = config.get_props(props_path, 'sr2_redis_master_hosts') added_hosts -= set(pre_hosts) can_deploy = True if no_localhost: added_hosts |= set([local_ip]) for host in added_hosts: client = net.get_ssh(host) is_localhost = Center().is_localhost(host) if is_localhost: if no_localhost: continue if os.path.exists(cluster_path + '/remote'): meta.append([host, color.green('CLEAN')]) continue if net.is_exist(client, cluster_path): meta.append([host, color.red('CLUSTER EXIST')]) can_deploy = False continue meta.append([host, color.green('CLEAN')]) if meta: utils.print_table([['HOST', 'STATUS']] + meta) if not can_deploy: msg = message.get('error_cluster_collision') logger.error(msg) return # if not force: # logger.error("If you trying to force, use option '--force'") # return logger.info('OK') # cluster stop and clean if deploy_state == DEPLOYED and clean: center = Center() cur_cluster_id = config.get_cur_cluster_id(allow_empty_id=True) run_cluster_use(cluster_id) center.update_ip_port() center.stop_redis() center.remove_all_of_redis_log_force() center.cluster_clean() run_cluster_use(cur_cluster_id) # backup conf if deploy_state == DEPLOYED: Center().conf_backup(local_ip, cluster_id, conf_backup_dir) # backup cluster backup_hosts = [] if deploy_state == DEPLOYED: backup_hosts += set(pre_hosts) # if force: # backup_hosts += added_hosts for host in backup_hosts: cluster_path = path_of_fb['cluster_path'] client = net.get_ssh(host) Center().cluster_backup(host, cluster_id, cluster_backup_dir) client.close() # transfer & install msg = message.get('transfer_and_execute_installer') logger.info(msg) target_hosts = hosts + [local_ip] if no_localhost else hosts for host in target_hosts: if not (no_localhost and Center().is_localhost(host)): logger.info(' - {}'.format(host)) client = net.get_ssh(host) cmd = 'mkdir -p {0} && touch {0}/.deploy.state'.format(cluster_path) net.ssh_execute(client=client, command=cmd) client.close() DeployUtil().transfer_installer(host, cluster_id, installer_path) try: DeployUtil().install(host, cluster_id, installer_name) except SSHCommandError as ex: msg = message.get('error_execute_installer') msg = msg.format(installer=installer_path) logger.error(msg) logger.exception(ex) return # setup props if deploy_state == DEPLOYED: if restore_yes: tag = conf_backup_dir else: tag = tmp_backup_dir Center().conf_restore(local_ip, cluster_id, tag) else: key = 'sr2_redis_master_hosts' config.make_key_enable(props_path, key) config.set_props(props_path, key, props_dict['hosts']) key = 'sr2_redis_master_ports' config.make_key_enable(props_path, key) value = cluster_util.convert_list_2_seq(props_dict['master_ports']) config.set_props(props_path, key, value) key = 'sr2_redis_slave_hosts' config.make_key_enable(props_path, key) config.set_props(props_path, key, props_dict['hosts']) config.make_key_disable(props_path, key) if props_dict['replicas'] > 0: key = 'sr2_redis_slave_hosts' config.make_key_enable(props_path, key) key = 'sr2_redis_slave_ports' config.make_key_enable(props_path, key) value = cluster_util.convert_list_2_seq(props_dict['slave_ports']) config.set_props(props_path, key, value) key = 'ssd_count' config.make_key_enable(props_path, key) config.set_props(props_path, key, props_dict['ssd_count']) key = 'sr2_redis_data' config.make_key_enable(props_path, key, v1_flg=True) config.make_key_enable(props_path, key, v1_flg=True) config.make_key_disable(props_path, key) config.set_props(props_path, key, props_dict['prefix_of_db_path']) key = 'sr2_redis_db_path' config.make_key_enable(props_path, key, v1_flg=True) config.make_key_enable(props_path, key, v1_flg=True) config.make_key_disable(props_path, key) config.set_props(props_path, key, props_dict['prefix_of_db_path']) key = 'sr2_flash_db_path' config.make_key_enable(props_path, key, v1_flg=True) config.make_key_enable(props_path, key, v1_flg=True) config.make_key_disable(props_path, key) config.set_props(props_path, key, props_dict['prefix_of_db_path']) # synk props msg = message.get('sync_conf') logger.info(msg) for node in hosts: if socket.gethostbyname(node) in config.get_local_ip_list(): continue client = net.get_ssh(node) if not client: msg = message.get('error_ssh_connection').format(host=node) logger.error(msg) return net.copy_dir_to_remote(client, conf_path, conf_path) client.close() # set deploy state complete if os.path.exists(tmp_backup_path): shutil.rmtree(tmp_backup_path) for node in target_hosts: path_of_fb = config.get_path_of_fb(cluster_id) cluster_path = path_of_fb['cluster_path'] client = net.get_ssh(node) cmd = 'rm -rf {}'.format(os.path.join(cluster_path, '.deploy.state')) net.ssh_execute(client=client, command=cmd) client.close() if no_localhost: os.system('touch {}/remote'.format(cluster_path)) msg = message.get('complete_deploy').format(cluster_id=cluster_id) logger.info(msg) Cluster().use(cluster_id) msg = message.get('suggest_after_deploy') logger.info(msg)
def restore(self, cluster_id, tag=None): """Restore cluster :param cluster_id: target cluster id :param tag: Tag of backup, if omitted, restore the most recent backup file """ logger.debug('cluster restore: cluster_id={}, tag={}'.format( cluster_id, tag )) if not cluster_util.validate_id(cluster_id): raise ClusterIdError(cluster_id) # find restore folder with tag (local) path_of_fb = config.get_path_of_fb(cluster_id) cluster_backup_path = path_of_fb['cluster_backup_path'] if tag is None: backup_list = os.listdir(cluster_backup_path) pattern = 'cluster_{}_bak_'.format(cluster_id) filtered = filter(lambda x: x.startswith(pattern), backup_list) sorted_list = sorted(list(filtered)) if not sorted_list: msg = message.get('error_not_found_any_backup') logger.error('BackupNotExistError: ' + msg) return tag = sorted_list[-1] logger.debug("tag option is empty, auto select: {}".format(tag)) cluster_restore_dir = tag backup_path = os.path.join(cluster_backup_path, cluster_restore_dir) if not os.path.isdir(backup_path): msg = message.get('error_not_found_backup').format(tag=tag) logger.error('BackupNotExistError: ' + msg) return # get hosts from cluster props props_path = os.path.join( backup_path, 'tsr2-assembly-1.0.0-SNAPSHOT', 'conf', 'redis.properties' ) hosts = config.get_props(props_path, 'sr2_redis_master_hosts', []) # check status of hosts success = Center().check_hosts_connection(hosts, True) if not success: msg = message.get('error_exist_unavailable_host') logger.error(msg) return logger.debug('Connection of all hosts ok.') success = Center().check_include_localhost(hosts) if not success: msg = message.get('error_not_include_localhost') logger.error(msg) return # check all host tag folder: OK / NOT FOUND msg = message.get('check_backup_info') logger.info(msg) buf = [] for host in hosts: client = net.get_ssh(host) if not net.is_dir(client, backup_path): logger.debug('cannot find backup dir: {}-{}'.format( host, cluster_restore_dir )) buf.append([host, color.red('NOT FOUND')]) client.close() if buf: utils.print_table([['HOST', 'RESULT'] + buf]) return logger.info('OK') # backup cluster new_tag = time.strftime("%Y%m%d%H%M%S", time.gmtime()) cluster_backup_dir = 'cluster_{}_bak_{}'.format(cluster_id, new_tag) for host in hosts: Center().cluster_backup(host, cluster_id, cluster_backup_dir) # restore cluster command = "cp -r {} {}/cluster_{}".format( backup_path, path_of_fb['base_directory'], cluster_id ) for host in hosts: msg = message.get('restore_cluster') msg = msg.format(tag=cluster_backup_dir, host=host) logger.info(msg) client = net.get_ssh(host) net.ssh_execute(client, command) client.close() logger.info("OK")
import os from ltcli import color necessary_env = [] export_example = { 'FBPATH': 'export FBPATH=$HOME/.flashbase', 'LANG': 'export LANG=en_US.utf-8', } if 'FBPATH' not in os.environ: necessary_env.append('FBPATH') if 'LANG' not in os.environ: necessary_env.append('LANG') if necessary_env: msg = [ 'To start using ltcli, you should set env {}'.format( ', '.join(necessary_env)), 'ex)', ] for env in necessary_env: msg.append(export_example[env]) print(color.red('\n'.join(msg))) exit(1) os.environ['LC_ALL'] = os.environ['LANG']
def get(key): try: return json_data[key] except KeyError: print(color.red("Message not found: '{}'".format(key))) return ""