def _to_config_yaml(cluster_id, release, nodes, master_start_port, master_end_port, master_enabled, slave_start_port, slave_end_port, slave_enabled, ssd_count): conf = {} conf['release'] = release conf['nodes'] = nodes conf['ssd'] = {} conf['master_ports'] = {} conf['slave_ports'] = {} conf['master_ports']['from'] = int(master_start_port) conf['master_ports']['to'] = int(master_end_port) conf['master_ports']['enabled'] = bool(master_enabled) conf['slave_ports']['from'] = int(slave_start_port) conf['slave_ports']['to'] = int(slave_end_port) conf['slave_ports']['enabled'] = bool(slave_enabled) conf['ssd']['count'] = int(ssd_count) root_of_cli_config = get_root_of_cli_config() cluster_base_path = path_join(root_of_cli_config, 'clusters') if not os.path.isdir(cluster_base_path): os.mkdir(cluster_base_path) cluster_path = path_join(root_of_cli_config, 'clusters', cluster_id) if not os.path.isdir(cluster_path): os.mkdir(cluster_path) yaml_path = path_join(cluster_path, 'config.yaml') with open(yaml_path, 'w') as fd: yaml.dump(conf, fd, default_flow_style=False)
def main(user, password, cluster_id, file, debug): _initial_check() if debug: log.set_mode('debug') if file: user_info['print_mode'] = 'file' logger.debug('Start fbcli') if not cluster_id: cluster_id = get_cur_cluster_id() run_cluster_use(cluster_id) if cluster_id not in cluster_util.get_cluster_list() + [-1]: root_of_cli_config = get_root_of_cli_config() head_path = path_join(root_of_cli_config, 'HEAD') with open(head_path, 'w') as fd: fd.write('%s' % '-1') if 'test' in sys.argv: run_test() exit(0) if file: _handle_file(user, file, cluster_id) exit(0) history = path_join(get_root_of_cli_config(), 'cli_history') session = PromptSession(lexer=PygmentsLexer(SqlLexer), completer=fb_completer, history=FileHistory(history), auto_suggest=AutoSuggestFromHistory(), style=style) while True: try: p = get_cli_prompt(user) logger.info(p) text = session.prompt() if text == "exit": break if 'fbcli' in text: old = text text = text.replace('fbcli', '') logger.info('> You can use "%s" instead of "%s"' % (text, old)) _handle(text) except KeyboardInterrupt: continue except EOFError: break
def rsync_and_update_conf(): fb_config = config.get_config() cluster_id = config.get_cur_cluster_id() nodes = fb_config['nodes'] repo_path = config.get_root_of_cli_config() DeployUtil.yaml_to_redis_props(repo_path, cluster_id, None) DeployUtil.rsync(repo_path, nodes, None) DeployUtil.overwrite_conf(None)
def _change_cluster(cluster_id): root_of_cli_config = get_root_of_cli_config() head_path = path_join(root_of_cli_config, 'HEAD') cluster_list = cluster_util.get_cluster_list() if cluster_id not in cluster_list + [-1]: return False, 'Cluster not exist: {}'.format(cluster_id) with open(head_path, 'w') as fd: fd.write('%s' % cluster_id) return True, ''
def clone(self, src, dest): """Clone cluster config from existing cluster config This command does not include deployment. :param src: src cluster # :param dest: dest cluster # """ logger.warning('clone disable') return self._print('Cluster clone from %s to %s' % (src, dest)) cluster_list = _ls() if str(dest) in cluster_list: raise DuplicatedError('cluster') if src < 0: config = get_config(template=True) src = 'template' else: config = get_config(src) release = self._installer() nodes = self._nodes(','.join(config['nodes'])) start_port, end_port = self._ports(dest) ssd_count = str(self._ssd_count(config['ssd']['count'])) src = str(src) dest = str(dest) root_of_cli_config = get_root_of_cli_config() cluster_path = path_join(root_of_cli_config, 'clusters') src_path = path_join(cluster_path, src) dest_path = path_join(cluster_path, dest) shutil.copytree(src_path, dest_path) offset = 50 Cluster.save_yaml(cluster_id=dest, release=release, nodes=nodes, master_start_port=start_port, master_end_port=end_port, master_enabled=True, slave_start_port=start_port + offset, slave_end_port=end_port + offset, slave_enabled=False, ssd_count=ssd_count) self.use(dest)
def _initial_check(): client = get_ssh('localhost') if not client: logger.error('Need to ssh-keygen for localhost') exit(1) cli_config = config.get_cli_config() try: base_directory = cli_config['base_directory'] except KeyError: pass except TypeError: root_of_cli_config = config.get_root_of_cli_config() conf_path = path_join(root_of_cli_config, 'config') os.system('rm {}'.format(conf_path)) base_directory = None if not base_directory or not base_directory.startswith(('~', '/')): base_directory = ask_util.base_directory() base_directory = os.path.expanduser(base_directory) if not os.path.isdir(base_directory): os.system('mkdir -p {}'.format(base_directory))
def save_yaml(cluster_id, release, nodes, master_start_port, master_end_port, master_enabled, slave_start_port, slave_end_port, slave_enabled, ssd_count): root_of_cli_config = get_root_of_cli_config() cluster_path = path_join(root_of_cli_config, 'clusters') yaml_path = path_join(cluster_path, cluster_id, 'config.yaml') with open(yaml_path, 'r') as fd: config = yaml.load(fd) config['release'] = release config['nodes'] = nodes config['master_ports']['from'] = int(master_start_port) config['master_ports']['to'] = int(master_end_port) config['master_ports']['enabled'] = bool(master_enabled) config['slave_ports']['from'] = int(slave_start_port) config['slave_ports']['to'] = int(slave_end_port) config['slave_ports']['enabled'] = bool(slave_enabled) config['ssd']['count'] = int(ssd_count) with open(yaml_path, 'w') as fd: yaml.dump(config, fd, default_flow_style=False)
def run(self): """Enter sql cli mode. At the first time, you use fbcli mode. If you type 'sql', you enter here. If you want to exit here, type 'exit' """ history = path_join(config.get_root_of_cli_config(), 'sql_history') session = PromptSession(lexer=PygmentsLexer(SqlLexer), completer=fb_completer, history=FileHistory(history), auto_suggest=AutoSuggestFromHistory(), style=style) bindings = KeyBindings() @bindings.add('enter') def _(event): t = event.app.current_buffer.text.strip() if t.endswith(';') or len(t) == 0 or \ t.startswith('exit') or t.startswith('help'): get_by_name('accept-line')(event) else: event.current_buffer.newline() while True: try: p = utils.TermColor.green(get_sql_prompt(self.user)) print(p) text = session.prompt(multiline=True, key_bindings=bindings) text = text.split(';')[0].strip() if text == 'exit': break self.handle(text) except KeyboardInterrupt: continue except EOFError: break
def run_edit(): p = path_join(config.get_root_of_cli_config(), 'config') editor.edit(p, syntax='yaml')
def run_import_conf(): def _to_config_yaml(cluster_id, release, nodes, master_start_port, master_end_port, master_enabled, slave_start_port, slave_end_port, slave_enabled, ssd_count): conf = {} conf['release'] = release conf['nodes'] = nodes conf['ssd'] = {} conf['master_ports'] = {} conf['slave_ports'] = {} conf['master_ports']['from'] = int(master_start_port) conf['master_ports']['to'] = int(master_end_port) conf['master_ports']['enabled'] = bool(master_enabled) conf['slave_ports']['from'] = int(slave_start_port) conf['slave_ports']['to'] = int(slave_end_port) conf['slave_ports']['enabled'] = bool(slave_enabled) conf['ssd']['count'] = int(ssd_count) root_of_cli_config = get_root_of_cli_config() cluster_base_path = path_join(root_of_cli_config, 'clusters') if not os.path.isdir(cluster_base_path): os.mkdir(cluster_base_path) cluster_path = path_join(root_of_cli_config, 'clusters', cluster_id) if not os.path.isdir(cluster_path): os.mkdir(cluster_path) yaml_path = path_join(cluster_path, 'config.yaml') with open(yaml_path, 'w') as fd: yaml.dump(conf, fd, default_flow_style=False) def _import_from_fb_to_cli_conf(rp_exists): for cluster_id in rp_exists: path_of_fb = config.get_path_of_fb(cluster_id) rp = path_of_fb['redis_properties'] d = config.get_props_as_dict(rp) nodes = d['sr2_redis_master_hosts'] master_start_port = 0 master_end_port = 0 slave_start_port = 0 slave_end_port = 0 master_enabled = 'sr2_redis_master_ports' in d slave_enabled = 'sr2_redis_slave_ports' in d if master_enabled: master_start_port = min(d['sr2_redis_master_ports']) master_end_port = max(d['sr2_redis_master_ports']) if slave_enabled: slave_start_port = min(d['sr2_redis_slave_ports']) slave_end_port = max(d['sr2_redis_slave_ports']) ssd_count = d['ssd_count'] _to_config_yaml(cluster_id=cluster_id, release='', nodes=nodes, master_start_port=master_start_port, master_end_port=master_end_port, master_enabled=master_enabled, slave_start_port=slave_start_port, slave_end_port=slave_end_port, slave_enabled=slave_enabled, ssd_count=ssd_count) logger.info('Save config.yaml from redis.properties') def _get_cluster_ids_from_fb(): cluster_id = config.get_cur_cluster_id() path_of_fb = config.get_path_of_fb(cluster_id) base_directory = path_of_fb['base_directory'] dirs = [ f for f in os.listdir(base_directory) if not os.path.isfile(os.path.join(base_directory, f)) ] cluster_ids = [d.split('_')[1] for d in dirs if 'cluster_' in d] return cluster_ids cluster_ids = _get_cluster_ids_from_fb() root_of_cli_config = get_root_of_cli_config() rp_exists = [] rp_not_exists = [] dest_folder_exists = [] meta = [['cluster_id', 'state']] for cluster_id in cluster_ids: path_of_fb = config.get_path_of_fb(cluster_id) rp = path_of_fb['redis_properties'] dest_path = path_join(root_of_cli_config, 'clusters', cluster_id) dest_path = path_join(dest_path, 'config.yaml') cluster_path = path_of_fb['cluster_path'] deploy_state = path_join(cluster_path, '.deploy.state') pending = DeployUtil().is_pending(cluster_id) if os.path.exists(dest_path): dest_folder_exists.append(cluster_id) meta.append([cluster_id, 'SKIP(dest_exist)']) elif os.path.isfile(rp) and not os.path.isfile(deploy_state): rp_exists.append(cluster_id) meta.append([cluster_id, 'IMPORT']) else: rp_not_exists.append(cluster_id) meta.append([cluster_id, 'SKIP(broken)']) logger.info('Diff fb and cli conf folders.') utils.print_table(meta) if len(rp_exists) == 0: return import_yes = askBool('Do you want to import conf?', ['y', 'n']) if not import_yes: return _import_from_fb_to_cli_conf(rp_exists)