コード例 #1
0
def get_datanodes_status(cluster):
    statuses = {}
    namenode = u.get_namenode(cluster)
    status_regexp = r'^Hostname: (.*)\nDecommission Status : (.*)$'
    matcher = re.compile(status_regexp, re.MULTILINE)
    dfs_report = namenode.remote().execute_command(
        'sudo su - -c "hdfs dfsadmin -report" hadoop')[1]

    for host, status in matcher.findall(dfs_report):
        statuses[host] = status.lower()

    return statuses
コード例 #2
0
def await_datanodes(cluster):
    datanodes_count = len(vu.get_datanodes(cluster))
    if datanodes_count < 1:
        return

    l_message = _("Waiting on %s datanodes to start up") % datanodes_count
    with vu.get_namenode(cluster).remote() as r:
        poll_utils.plugin_option_poll(cluster, _check_datanodes_count,
                                      c_helper.DATANODES_STARTUP_TIMEOUT,
                                      l_message, 1, {
                                          'remote': r,
                                          'count': datanodes_count
                                      })
コード例 #3
0
    def _set_cluster_info(self, cluster):
        nn = vu.get_namenode(cluster)
        rm = vu.get_resourcemanager(cluster)
        hs = vu.get_historyserver(cluster)
        oo = vu.get_oozie(cluster)
        sp = vu.get_spark_history_server(cluster)
        sp_master = vu.get_spark_master(cluster)

        info = {}

        if rm:
            info['YARN'] = {
                'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'),
                'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032')
            }

        if nn:
            info['HDFS'] = {
                'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'),
                'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
            }

        if oo:
            info['JobFlow'] = {
                'Oozie': 'http://%s:%s' % (oo.management_ip, '11000')
            }

        if hs:
            info['MapReduce JobHistory Server'] = {
                'Web UI': 'http://%s:%s' % (hs.management_ip, '19888')
            }

        if sp:
            info['Apache Spark'] = {
                'Spark UI':
                'http://%s:%s' % (sp.management_ip, '4040'),
                'Spark History Server UI':
                'http://%s:%s' % (sp.management_ip, '18080')
            }

        if sp_master:
            info['Spark'] = {
                'Web UI': 'http://%s:%s' % (sp_master.management_ip, '8080')
            }

        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
コード例 #4
0
def start_namenode(cluster, backup=None):
    nn = vu.get_namenode(cluster)
    _start_namenode(nn, backup)
コード例 #5
0
def refresh_hadoop_nodes(cluster):
    nn = vu.get_namenode(cluster)
    nn.remote().execute_command(
        'sudo su - -c "hdfs dfsadmin -refreshNodes" hadoop')