Ejemplo n.º 1
0
def copy_dir_from_remote(client, remote_path, local_path):
    """copy directory from remote to local

    if already file exist, overwrite file
    copy all files recursively
    directory must exist

    :param client: SSHClient
    :param remote_path: absolute path of file
    :param local_path: absolute path of file
    """
    logger.debug('copy FROM node:{} TO localhost:{}'.format(
        remote_path,
        local_path
    ))
    sftp = get_sftp(client)
    listdir = sftp.listdir(remote_path)
    for f in listdir:
        r_path = os.path.join(remote_path, f)
        l_path = os.path.join(local_path, f)
        if is_dir(client, r_path):
            if not os.path.exists(l_path):
                os.mkdir(l_path)
            copy_dir_from_remote(client, r_path, l_path)
        else:
            sftp.get(r_path, l_path)
Ejemplo n.º 2
0
    def find_noaddr(self):
        """Find noaddr nodes that is not used anymore in cluster
        """
        center = Center()
        center.update_ip_port()
        logger.debug('find_noaddr')
        ret = RedisCliUtil.command_all_async('cluster nodes', slave=True)
        outs = ''
        meta = []
        for _, host, port, res, stdout in ret:
            if res == 'OK':
                outs = '\n'.join([outs, stdout])
                lines = outs.splitlines()
                filtered_lines = (filter(lambda x: 'noaddr' in x, lines))
            else:
                logger.warning("FAIL {}:{} {}".format(host, port, stdout))

        total_list = []
        for line in filtered_lines:
            total_list.append(line.split()[0])

        # Remove duplicates
        unique_list = list(set(total_list))
        for uuid in unique_list:
            meta.append([uuid])
        utils.print_table([['UUID']] + meta)
Ejemplo n.º 3
0
    def masters_with_dir(self, server, dir):
        """Find masters that use the specified directory path

        :param server: IP or hostname
        :param dir: directory path
        """
        center = Center()
        center.update_ip_port()
        logger.debug('masters_with_dir')
        master_nodes = center.get_master_obj_list()
        ret = RedisCliUtil.command_all_async('config get dir', slave=True)
        outs = ''
        meta = []
        m_endpoint = []
        for node in master_nodes:
            m_endpoint.append(node['addr'])
        for _, host, port, res, stdout in ret:
            if res == 'OK':
                flat_stdout = '\n'.join([outs, stdout])
                line = flat_stdout.splitlines()
                if self.compare_ip(host, server) and dir in line[2]:
                    endpoint = '{}:{}'.format(socket.gethostbyname(host), port)
                    if endpoint in m_endpoint:
                        meta.append([host, port, line[2]])
            else:
                logger.warning("FAIL {}:{} {}".format(host, port, stdout))
        utils.print_table([['HOST', 'PORT', 'PATH']] + meta)
Ejemplo n.º 4
0
def convert_list_2_hyphen(ports):
    '''
    converted as shown below
    [1, 2, 3, 5, 7, 8, 10]
    =>
    ['1-3', '5', '7-8', 10]
    '''
    logger.debug('ports: {}'.format(ports))
    ret = []
    s = ports[0]
    pre = ports[0] - 1
    try:
        for port in ports:
            if pre != port - 1:
                if s != pre:
                    ret.append('{}-{}'.format(s, pre))
                else:
                    ret.append(str(s))
                s = port
            pre = port
        if s != pre:
            ret.append('{}-{}'.format(s, pre))
        else:
            ret.append(str(s))
        logger.debug('converted: {}'.format(ret))
        return ret
    except Exception:
        raise ConvertError("Invalid ports: '{}'".format(ports))
Ejemplo n.º 5
0
def ping(host, duration=3):
    command = 'ping -c 1 -t {} {} > /dev/null 2>&1'.format(duration, host)
    response = os.system(command)
    print(response)
    logger.debug('ping to {}, respose: {}'.format(host, response))
    if response is not 0:
        raise HostConnectionError(host)
Ejemplo n.º 6
0
 def beeline(self, **kargs):
     """Connect to thriftserver command line
     """
     logger.debug('thriftserver_command_beeline')
     _check_spark()
     cluster_id = config.get_cur_cluster_id()
     path_of_fb = config.get_path_of_fb(cluster_id)
     ths_props_path = path_of_fb['thrift_properties']
     cmd = 'source {}; echo ${}'.format(ths_props_path, 'HIVE_HOST')
     host = sp.check_output(cmd, shell=True).decode('utf-8').strip()
     cmd = 'source {}; echo ${}'.format(ths_props_path, 'HIVE_PORT')
     port = sp.check_output(cmd, shell=True).decode('utf-8').strip()
     spark_env = _get_env()
     base_cmd = '{}/beeline'.format(spark_env['spark_bin'])
     options = {
         'u': 'jdbc:hive2://{}:{}'.format(host, port),
         'n': os.environ['USER']
     }
     for key, value in kargs.items():
         options[key] = value
     for key, value in options.items():
         base_cmd += ' -{} {}'.format(key, value)
     logger.debug(base_cmd)
     msg = message.get('try_connection')
     logger.info(msg)
     os.system(base_cmd)
Ejemplo n.º 7
0
def copy_dir_to_remote(client, local_path, remote_path):
    """copy directory from local to remote
    if already file exist, overwrite file
    copy all files recursively
    directory must exist

    :param client: SSHClient
    :param local_path: absolute path of file
    :param remote_path: absolute path of file
    """
    logger.debug('copy_dir_to_remote')
    logger.debug('copy FROM localhost:{} TO node:{}'.format(
        local_path,
        remote_path
    ))
    sftp = get_sftp(client)
    listdir = os.listdir(local_path)
    for f in listdir:
        r_path = os.path.join(remote_path, f)
        l_path = os.path.join(local_path, f)
        if os.path.isdir(l_path):
            if not is_exist(client, r_path):
                sftp.mkdir(r_path)
            copy_dir_to_remote(client, l_path, r_path)
        else:
            sftp.put(l_path, r_path)
Ejemplo n.º 8
0
    def failover_with_dir(self, server, dir):
        """Find masters that use the specified directory path and do failover with its slave

        :param server: IP or hostname
        :param dir: directory path
        """
        center = Center()
        center.update_ip_port()
        logger.debug('failover_with_dir')
        master_nodes = center.get_master_obj_list()
        cluster_id = config.get_cur_cluster_id()
        lib_path = config.get_ld_library_path(cluster_id)
        path_of_fb = config.get_path_of_fb(cluster_id)
        sr2_redis_bin = path_of_fb['sr2_redis_bin']
        env_cmd = [
            'GLOBIGNORE=*;',
            'export LD_LIBRARY_PATH={};'.format(lib_path['ld_library_path']),
            'export DYLD_LIBRARY_PATH={};'.format(
                lib_path['dyld_library_path']),
        ]
        redis_cli_cmd = os.path.join(sr2_redis_bin, 'redis-cli')

        # Find masters with dir
        ret = RedisCliUtil.command_all_async('config get dir', slave=True)
        outs = ''
        meta = []
        m_endpoint = []
        for node in master_nodes:
            m_endpoint.append(node['addr'])
        for _, host, port, res, stdout in ret:
            if res == 'OK':
                flat_stdout = '\n'.join([outs, stdout])
                line = flat_stdout.splitlines()
                if self.compare_ip(host, server) and dir in line[2]:
                    endpoint = '{}:{}'.format(socket.gethostbyname(host), port)
                    if endpoint in m_endpoint:
                        meta.append(endpoint)
            else:
                logger.warning("FAIL {}:{} {}".format(host, port, stdout))

        for endpoint in meta:
            for master_node in master_nodes:
                if endpoint == master_node['addr']:
                    for slave_node in master_node['slaves']:
                        addr = slave_node['addr']
                        (s_host, s_port) = addr.split(':')
                        sub_cmd = 'cluster failover takeover'
                        command = '{} {} -h {} -p {} {}'.format(
                            ' '.join(env_cmd),
                            redis_cli_cmd,
                            s_host,
                            s_port,
                            sub_cmd,
                        )
                        self._print(
                            message.get('try_failover_takeover').format(
                                slave=addr))
                        stdout = subprocess.check_output(command, shell=True)
                        self._print(stdout)
Ejemplo n.º 9
0
    def start(self, profile=False, master=True, slave=True):
        """Start cluster

        :param master: If exclude master cluster, set False
        :param slave: If exclude slave cluster, set False
        """
        logger.debug("command 'cluster start'")
        if not isinstance(profile, bool):
            msg = message.get('error_option_type_not_boolean')
            msg = msg.format(option='profile')
            logger.error(msg)
            return
        if not isinstance(master, bool):
            msg = message.get('error_option_type_not_boolean')
            msg = msg.format(option='master')
            logger.error(msg)
            return
        if not isinstance(slave, bool):
            msg = message.get('error_option_type_not_boolean')
            msg = msg.format(option='slave')
            logger.error(msg)
            return
        center = Center()
        center.update_ip_port()
        success = center.check_hosts_connection()
        if not success:
            return
        center.ensure_cluster_exist()
        if master:
            master_alive_count = center.get_alive_master_redis_count()
            master_alive_count_mine = center.get_alive_master_redis_count(
                check_owner=True
            )
            not_mine_count = master_alive_count - master_alive_count_mine
            if not_mine_count > 0:
                msg = message.get('error_cluster_start_master_collision')
                msg = '\n'.join(msg).format(count=not_mine_count)
                raise LightningDBError(11, msg)
        if slave:
            slave_alive_count = center.get_alive_slave_redis_count()
            slave_alive_count_mine = center.get_alive_slave_redis_count(
                check_owner=True
            )
            not_mine_count = slave_alive_count - slave_alive_count_mine
            if not_mine_count > 0:
                msg = message.get('error_cluster_start_slave_collision')
                msg = '\n'.join(msg).format(count=not_mine_count)
                raise LightningDBError(12, msg)
        center.backup_server_logs(master=master, slave=slave)
        center.create_redis_data_directory()

        # equal to cluster.configure()
        center.configure_redis()
        center.sync_conf(show_result=True)

        center.start_redis_process(profile, master=master, slave=slave)
        center.wait_until_all_redis_process_up(master=master, slave=slave)
Ejemplo n.º 10
0
    def command_all_async(sub_cmd, slave=True):
        def _async_target_func(m_s, pre_cmd, host, port, sub_cmd, ret):
            try:
                command = '{} -h {} -p {} {}'.format(pre_cmd, host, port,
                                                     sub_cmd)
                logger.debug(command)
                stdout = subprocess.check_output(command, shell=True)
                stdout = stdout.decode('utf-8').strip()
                ret.append((m_s, host, port, 'OK', stdout))
            except Exception as ex:
                stderr = str(ex)
                logger.debug(stderr)
                ret.append((m_s, host, port, 'FAIL', stderr))

        cluster_id = config.get_cur_cluster_id()
        master_host_list = config.get_master_host_list(cluster_id)
        master_port_list = config.get_master_port_list(cluster_id)
        if slave:
            slave_host_list = config.get_slave_host_list(cluster_id)
            slave_port_list = config.get_slave_port_list(cluster_id)
        path_of_fb = config.get_path_of_fb(cluster_id)
        sr2_redis_bin = path_of_fb['sr2_redis_bin']

        logger.debug('command_all_async')
        cluster_id = config.get_cur_cluster_id()
        lib_path = config.get_ld_library_path(cluster_id)
        env_cmd = [
            'export LD_LIBRARY_PATH={};'.format(lib_path['ld_library_path']),
            'export DYLD_LIBRARY_PATH={};'.format(
                lib_path['dyld_library_path']),
        ]
        env = ' '.join(env_cmd)
        threads = []
        ret = []  # (m/s, host, port, result, message)
        pre_cmd = '{} {}/redis-cli -c'.format(env, sr2_redis_bin)
        for host in master_host_list:
            for port in master_port_list:
                t = Thread(
                    target=_async_target_func,
                    args=('Master', pre_cmd, host, port, sub_cmd, ret),
                )
                threads.append(t)
        if slave:
            for host in slave_host_list:
                for port in slave_port_list:
                    t = Thread(
                        target=_async_target_func,
                        args=('Slave', pre_cmd, host, port, sub_cmd, ret),
                    )
                    threads.append(t)
        for th in threads:
            th.start()
            time.sleep(0.02)
        for th in threads:
            th.join()
        logger.debug(ret)
        return ret
Ejemplo n.º 11
0
    def rowcount(self):
        """Query and show cluster row count
        """
        logger.debug('rowcount')

        masters = []
        center = Center()
        center.update_ip_port()
        master_nodes = center.get_master_obj_list()
        for master_node in master_nodes:
            node = master_node['addr']
            masters.append(node)

        # open-redis-cli-all info Tablespace | grep totalRows | awk -F ',
        # ' '{print $4}' | awk -F '=' '{sum += $2} END {print sum}'
        ret = RedisCliUtil.command_all_async('info Tablespace', slave=True)
        outs = ''
        for _, host, port, res, stdout in ret:
            if res == 'OK':
                endpoint = '{}:{}'.format(host, port)
                if endpoint in masters:
                    outs = '\n'.join([outs, stdout])
            else:
                logger.warning("FAIL {}:{} {}".format(host, port, stdout))
        lines = outs.splitlines()

        key = 'totalRows'
        partitions = 'partitions'
        evictions = 'evictedRows'
        filtered_lines = (filter(lambda x: key in x, lines))
        #self._print(filtered_lines)

        # Table list
        table_list = []
        result = []
        for line in filtered_lines:
            tableStats, _ = line.split(':')
            tableId = tableStats.split('_')
            if tableId[1] in table_list:
                pass
            else:
                table_list.append(tableId[1])

        for tid in table_list:
            table_lines = (filter(lambda x: tid in x, filtered_lines))
            ld = RedisCliUtil.to_list_of_dict(table_lines)
            row_count = reduce(lambda x, y: x + int(y[key]), ld, 0)
            partitions_count = reduce(lambda x, y: x + int(y[partitions]), ld,
                                      0)
            evictions_count = reduce(lambda x, y: x + int(y[evictions]), ld, 0)
            result.append([tid, row_count, partitions_count, evictions_count])

        utils.print_table(
            [['Table_ID', 'ROW_COUNT', 'PARTITION_COUNT', 'EVICTED_ROWS']] +
            result)
Ejemplo n.º 12
0
def base_directory(default='~/tsr2'):
    logger.debug('ask base directory')
    result = ask(message.get('ask_base_directory'), default=default)
    if not result.startswith(('~', '/')):
        logger.error(message.get('error_invalid_path').format(value=result))
        return base_directory()
    logger.info('OK, {}'.format(result))
    cli_config = config.get_cli_config()
    cli_config['base_directory'] = result
    config.save_cli_config(cli_config)
    return result
Ejemplo n.º 13
0
def get_sftp(client):
    """Open sftp

    :param client: SSHClient instance
    :return: opened sftp instance
    """
    try:
        sftp = client.open_sftp()
        return sftp
    except Exception as e:
        logger.debug(e)
Ejemplo n.º 14
0
def prefix_of_db_path(save, default=None):
    logger.debug('ask_prefix_of_db_path')
    deploy_history = config.get_deploy_history()
    if not default:
        default = deploy_history['prefix_of_db_path']
    result = ask(message.get('ask_db_path'), default=default)
    result = result.strip()
    if save:
        deploy_history['prefix_of_db_path'] = result
        config.save_deploy_history(deploy_history)
    logger.info('OK, {}'.format(result))
    return result
Ejemplo n.º 15
0
 def _async_target_func(m_s, pre_cmd, host, port, sub_cmd, ret):
     try:
         command = '{} -h {} -p {} {}'.format(pre_cmd, host, port,
                                              sub_cmd)
         logger.debug(command)
         stdout = subprocess.check_output(command, shell=True)
         stdout = stdout.decode('utf-8').strip()
         ret.append((m_s, host, port, 'OK', stdout))
     except Exception as ex:
         stderr = str(ex)
         logger.debug(stderr)
         ret.append((m_s, host, port, 'FAIL', stderr))
Ejemplo n.º 16
0
def hosts(save, default=None):
    logger.debug('ask host')
    deploy_history = config.get_deploy_history()
    if not default:
        default = deploy_history['hosts']
    msg = message.get('ask_hosts')
    result = ask(msg, default=', '.join(default))
    result = list(map(lambda x: x.strip(), result.split(',')))
    if save:
        deploy_history['hosts'] = result
        config.save_deploy_history(deploy_history)
    logger.info('OK, {}'.format(result))
    return result
Ejemplo n.º 17
0
 def stop(self):
     """Stop thriftserver
     """
     logger.debug('thriftserver_command_stop')
     _check_spark()
     cluster_id = config.get_cur_cluster_id()
     path_of_fb = config.get_path_of_fb(cluster_id)
     ths_props_path = path_of_fb['thrift_properties']
     source_cmd = 'source {}'.format(ths_props_path)
     base_cmd = '$SPARK_SBIN/stop-thriftserver.sh'
     cmd = '{}; {}'.format(source_cmd, base_cmd)
     logger.debug(cmd)
     os.system(cmd)
Ejemplo n.º 18
0
def ssd_count(save, default=None):
    logger.debug('ask ssd count')
    deploy_history = config.get_deploy_history()
    if not default:
        default = deploy_history['ssd_count']
    result = int(askInt(message.get('ask_ssd_count'), default=str(default)))
    if result <= 0:
        logger.error(message.get('error_ssd_count_less_than_1'))
        return ssd_count(save=save, default=default)
    if save:
        deploy_history['ssd_count'] = result
        config.save_deploy_history(deploy_history)
    logger.info('OK, {}'.format(result))
    return result
Ejemplo n.º 19
0
def replicas(save, default=None):
    logger.debug('ask replicas')
    deploy_history = config.get_deploy_history()
    if not default:
        default = deploy_history['replicas']
    result = askInt(message.get('ask_replicas'), default=str(default))
    result = int(result)
    if result < 0:
        logger.error(message.get('error_replicas_less_than_0'))
        return replicas(save, default=default)
    if save:
        deploy_history['replicas'] = result
        config.save_deploy_history(deploy_history)
    logger.info('OK, {}'.format(result))
    return result
Ejemplo n.º 20
0
 def _save_config(f, key, value):
     inplace_count = 0
     for line in fileinput.input(f, inplace=True):
         words = line.split()
         if words and words[0] == key:
             msg = '{key} {value}'.format(key=key, value=value)
             inplace_count += 1
             print(msg)
         else:
             print(line, end='')
     logger.debug('inplace: %d (%s)' % (inplace_count, f))
     if inplace_count == 1:
         logger.debug('save config(%s) success' % f)
     else:
         msg = message.get('error_save_config').format(key=key, file=f)
         logger.warning(msg)
Ejemplo n.º 21
0
def run_deploy(cluster_id=None,
               history_save=True,
               clean=False,
               strategy="none"):
    """Install LightningDB package.

    :param cluster_id: cluster id
    :param history_save: save input history and use as default
    :param clean: delete redis log, node configuration
    :param strategy:
        none(default): normal deploy,
        zero-downtime: re-deploy without stop
    """
    # validate cluster id
    if cluster_id is None:
        cluster_id = config.get_cur_cluster_id(allow_empty_id=True)
        if cluster_id < 0:
            msg = message.get('error_invalid_cluster_on_deploy')
            logger.error(msg)
            return
    if not cluster_util.validate_id(cluster_id):
        raise ClusterIdError(cluster_id)

    # validate option
    if not isinstance(history_save, bool):
        msg = message.get('error_option_type_not_boolean')
        msg = msg.format(option='history-save')
        logger.error(msg)
        return
    logger.debug("option '--history-save': {}".format(history_save))
    if not isinstance(clean, bool):
        msg = message.get('error_option_type_not_boolean')
        msg = msg.format(option='clean')
        logger.error(msg)
        return
    logger.debug("option '--clean': {}".format(clean))
    strategy_list = ["none", "zero-downtime"]
    if strategy not in strategy_list:
        msg = message.get('error_deploy_strategy').format(value=strategy,
                                                          list=strategy_list)
        logger.error(msg)
        return
    if strategy == "zero-downtime":
        run_cluster_use(cluster_id)
        _deploy_zero_downtime(cluster_id)
        return
    _deploy(cluster_id, history_save, clean)
Ejemplo n.º 22
0
def main(cluster_id, debug, version):
    if version:
        print_version()
        return
    _initial_check()
    if debug:
        log.set_mode('debug')

    logger.debug('Start ltcli')

    cluster_id = _validate_cluster_id(cluster_id)

    history = os.path.join(config.get_root_of_cli_config(), 'cli_history')
    session = PromptSession(
        # lexer=PygmentsLexer(SqlLexer),
        history=FileHistory(history),
        auto_suggest=AutoSuggestFromHistory(),
        style=utils.style)
    while True:
        try:
            exit_flg = False
            p = prompt.get_cli_prompt()
            text = session.prompt(p, style=utils.style)
            command_list = text.split(';')
            for cmd in command_list:
                cmd = cmd.strip()
                if cmd == "exit":
                    exit_flg = True
                    break
                if 'ltcli' in cmd:
                    old = cmd
                    cmd = cmd.replace('ltcli', '').strip()
                    msg = message.get('notify_command_replacement_is_possible')
                    msg = msg.format(new=cmd, old=old)
                    logger.info(msg)
                err_flg = _handle(cmd)
                if err_flg:
                    break
            if exit_flg:
                break
        except ClusterNotExistError:
            run_cluster_use(-1)
            continue
        except KeyboardInterrupt:
            continue
        except EOFError:
            break
Ejemplo n.º 23
0
 def install(self, host, cluster_id, name):
     logger.debug('Deploy cluster {} at {}...'.format(cluster_id, host))
     path_of_fb = config.get_path_of_fb(cluster_id)
     release_path = path_of_fb['release_path']
     cluster_path = path_of_fb['cluster_path']
     if '/' in name:
         name = os.path.basename(name)
     installer_path = os.path.join(release_path, name)
     command = '''chmod 755 {0}; \
         PATH=${{PATH}}:/usr/sbin; \
         {0} --full {1}'''.format(installer_path, cluster_path)
     client = net.get_ssh(host)
     if not net.is_exist(client, installer_path):
         raise FileNotExistError(installer_path, host=host)
     net.ssh_execute(client=client, command=command)
     client.close()
     logger.debug('OK')
Ejemplo n.º 24
0
    def force_failover(self, server):
        """ Find all masters on the server and convert them to slaves. Finally, in the server, only slaves will be remained.

        :param server: IP or hostname
        """

        logger.debug('force_failover')
        center = Center()
        center.update_ip_port()
        master_nodes = center.get_master_obj_list()
        cluster_id = config.get_cur_cluster_id()
        lib_path = config.get_ld_library_path(cluster_id)
        path_of_fb = config.get_path_of_fb(cluster_id)
        sr2_redis_bin = path_of_fb['sr2_redis_bin']
        env_cmd = [
            'GLOBIGNORE=*;',
            'export LD_LIBRARY_PATH={};'.format(lib_path['ld_library_path']),
            'export DYLD_LIBRARY_PATH={};'.format(
                lib_path['dyld_library_path']),
        ]
        redis_cli_cmd = os.path.join(sr2_redis_bin, 'redis-cli')

        outs = ''
        meta = []
        m_endpoint = []
        for node in master_nodes:
            addr = node['addr']
            (host, port) = addr.split(':')
            # if host == server:
            if self.compare_ip(host, server):
                for slave_node in node['slaves']:
                    addr = slave_node['addr']
                    (s_host, s_port) = addr.split(':')
                    sub_cmd = 'cluster failover takeover'
                    command = '{} {} -h {} -p {} {}'.format(
                        ' '.join(env_cmd),
                        redis_cli_cmd,
                        s_host,
                        s_port,
                        sub_cmd,
                    )
                    self._print(
                        message.get('try_failover_takeover').format(
                            slave=addr))
                    stdout = subprocess.check_output(command, shell=True)
                    self._print(stdout)
Ejemplo n.º 25
0
    def distribution(self):
        """Check the distribution of all masters and slaves
        """
        center = Center()
        center.update_ip_port()
        logger.debug('distribution')
        ret = RedisCliUtil.command_all_async('cluster nodes', slave=True)
        outs = ''
        for _, host, port, res, stdout in ret:
            if res == 'OK':
                outs = '\n'.join([outs, stdout])
                lines = outs.splitlines()
                myself_key = 'myself'
                filtered_lines = (filter(lambda x: myself_key in x, lines))
            else:
                logger.warning("FAIL {}:{} {}".format(host, port, stdout))

        meta = []
        total_masters = 0
        total_slaves = 0
        for nd in center.master_host_list:
            num_of_masters = 0
            num_of_slaves = 0
            node = socket.gethostbyname(nd)

            host_lines = (filter(lambda x: (node + ':') in x, filtered_lines))
            for node in host_lines:
                params = node.split()
                endpoint = params[1]
                roles = params[2]
                host = endpoint.split(':')[0]
                role = roles.split(',')[1]
                if role == 'master':
                    if len(params) == 9:
                        num_of_masters += 1
                else:
                    num_of_slaves += 1
            total_masters += num_of_masters
            total_slaves += num_of_slaves
            hostname = str(socket.gethostbyaddr(host)[0]) + str('(') + str(
                host) + str(')')
            meta.append([hostname, num_of_masters, num_of_slaves])

        meta.append(['TOTAL', total_masters, total_slaves])
        utils.print_table([['HOST', 'MASTER', 'SLAVE']] + meta)
Ejemplo n.º 26
0
 def start(self):
     """Start thriftserver
     """
     logger.debug('thriftserver_command_start')
     _check_spark()
     cluster_id = config.get_cur_cluster_id()
     path_of_fb = config.get_path_of_fb(cluster_id)
     ths_props_path = path_of_fb['thrift_properties']
     source_cmd = 'source {}'.format(ths_props_path)
     hive_opts = _get_hive_opts_str()
     base_cmd = '$SPARK_SBIN/start-thriftserver.sh {}'.format(hive_opts)
     cmd = '{}; {}'.format(source_cmd, base_cmd)
     logger.debug(cmd)
     os.system(cmd)
     spark_log = os.path.join(os.environ['SPARK_HOME'], 'logs')
     if _find_files_with_regex(spark_log, ROLLING_LOGFILE_REGEX):
         for file in _find_files_with_regex(spark_log, NOHUP_LOGFILE_REGEX):
             os.remove(os.path.join(spark_log, file))
Ejemplo n.º 27
0
    def reset_distribution(self):
        """ Reset the distribution of masters and slaves with original setting
        """
        center = Center()
        center.update_ip_port()
        logger.debug('reset_distribution')
        cluster_id = config.get_cur_cluster_id()
        lib_path = config.get_ld_library_path(cluster_id)
        path_of_fb = config.get_path_of_fb(cluster_id)
        sr2_redis_bin = path_of_fb['sr2_redis_bin']
        env_cmd = [
            'GLOBIGNORE=*;',
            'export LD_LIBRARY_PATH={};'.format(lib_path['ld_library_path']),
            'export DYLD_LIBRARY_PATH={};'.format(
                lib_path['dyld_library_path']),
        ]
        redis_cli_cmd = os.path.join(sr2_redis_bin, 'redis-cli')
        slave_nodes = center.get_slave_nodes()
        master_ports = center.master_port_list

        for slave_node in slave_nodes:
            (host, port) = slave_node.split(':')
            try:
                value = int(port)
                if value in master_ports:
                    # failover takeover
                    msg = message.get('try_failover_takeover').format(
                        slave=slave_node)
                    self._print(msg)
                    sub_cmd = 'cluster failover takeover'
                    command = '{} {} -h {} -p {} {}'.format(
                        ' '.join(env_cmd),
                        redis_cli_cmd,
                        host,
                        port,
                        sub_cmd,
                    )
                    stdout = subprocess.check_output(command, shell=True)
                    outs = ''
                    outs = '\n'.join([outs, stdout])
                    self._print(outs)
            except ValueError:
                pass
Ejemplo n.º 28
0
Archivo: config.py Proyecto: mnms/LTCLI
def get_props_as_dict(props_path):
    ret = dict()
    with open(props_path, 'r') as f:
        lines = f.readlines()
        for i, line in enumerate(lines):
            if line.strip().startswith('#'):
                continue
            p = re.compile(
                r'export [^ \s\t\r\n\v\f]+=(\(.+\)|[^ \s\t\r\n\v\f]+)')
            m = p.search(line)
            if not m:
                continue
            s = m.start()
            e = m.end()
            key, value = line[s:e + 1].replace('export ', '').split('=')
            value = value.strip()
            key = key.lower()
            p = re.compile(r'\(.*\)')
            m = p.match(value)
            try:
                if m:
                    cmd = [
                        'FBCLI_TMP_ENV={}'.format(value), '&&',
                        'echo ${FBCLI_TMP_ENV[@]}'
                    ]
                    cmd = ' '.join(cmd)
                    logger.debug('subprocess cmd: {}'.format(cmd))
                    value = subprocess.check_output(cmd, shell=True)
                    value = to_str(value.strip())
                    logger.debug('subprocess result: {}'.format(value))
                    value = value.split(' ')
                    value = map(lambda x: int(x) if is_number(x) else x, value)
                    value = filter(lambda x: bool(x), value)
                    value = list(value)
                else:
                    cmd = 'echo {}'.format(value)
                    value = subprocess.check_output(cmd, shell=True)
                    value = to_str(value.strip())
                    value = int(value) if is_number(value) else value
                ret[key] = value
            except subprocess.CalledProcessError:
                raise PropsSyntaxError(value, i + 1)
    return ret
Ejemplo n.º 29
0
def download_file(url, file_path):
    download_path = file_path + '.download'
    file_name = os.path.basename(file_path)
    try:
        with open(download_path, 'wb') as f:
            msg = message.get('file_download').format(file_name=file_name)
            logger.info(msg)
            logger.debug('url: {}'.format(url))
            logger.debug('installer name: {}'.format(file_name))
            response = requests.get(url, stream=True)
            response.raise_for_status()
            total_length = response.headers.get('content-length')
            if total_length is None:
                f.write(response.content)
            else:
                done_length = 0
                total_length = int(total_length)
                for data in response.iter_content(chunk_size=4096):
                    done_length += len(data)
                    f.write(data)
                    done = int(100 * done_length / total_length)
                    comp = '=' * int(done / 2)
                    remain = ' ' * int(50 - int(done / 2))
                    progress = '\r[{}{}] {}%'.format(comp, remain, done)
                    sys.stdout.write(progress)
                    sys.stdout.flush()
            print('')
            shutil.move(download_path, file_path)
            return True
    except requests.exceptions.HTTPError as ex:
        logger.warning(ex)
        return False
    except KeyboardInterrupt as ex:
        print('')
        raise ex
    except BaseException as ex:
        class_name = ex.__class__.__name__
        logger.warning('{}: {}'.format(class_name, url))
        return False
    finally:
        if os.path.isfile(download_path):
            os.remove(download_path)
Ejemplo n.º 30
0
 def rowcount(self):
     """Query and show cluster row count
     """
     logger.debug('rowcount')
     # open-redis-cli-all info Tablespace | grep totalRows | awk -F ',
     # ' '{print $4}' | awk -F '=' '{sum += $2} END {print sum}'
     ret = RedisCliUtil.command_all_async('info Tablespace', slave=False)
     outs = ''
     for _, host, port, res, stdout in ret:
         if res == 'OK':
             outs = '\n'.join([outs, stdout])
         else:
             logger.warning("FAIL {}:{} {}".format(host, port, stdout))
     lines = outs.splitlines()
     key = 'totalRows'
     filtered_lines = (filter(lambda x: key in x, lines))
     ld = RedisCliUtil.to_list_of_dict(filtered_lines)
     # row_count = reduce(lambda x, y: {key: int(x[key]) + int(y[key])}, ld)
     row_count = reduce(lambda x, y: x + int(y[key]), ld, 0)
     self._print(row_count)