Exemplo n.º 1
0
 def test_refresh_hadoop_nodes(self, add_provisioning_step,
                               check_cluster_exists, get_namenode):
     cluster = mock.Mock()
     get_namenode.return_value = self.instance
     rs.refresh_hadoop_nodes(cluster)
     get_namenode.assert_called_once_with(cluster)
     self.remote.execute_command.assert_called_once_with(
         'sudo su - -c "hdfs dfsadmin -refreshNodes" hadoop')
Exemplo n.º 2
0
def scale_cluster(pctx, cluster, instances):
    config.configure_instances(pctx, instances)
    _update_include_files(cluster)
    run.refresh_hadoop_nodes(cluster)
    run.refresh_yarn_nodes(cluster)
    config.configure_topology_data(pctx, cluster)
    for instance in instances:
        run.start_instance(instance)
Exemplo n.º 3
0
def scale_cluster(pctx, cluster, instances):
    config.configure_instances(pctx, instances)
    _update_include_files(cluster)
    run.refresh_hadoop_nodes(cluster)
    rm = vu.get_resourcemanager(cluster)
    if rm:
        run.refresh_yarn_nodes(cluster)

    config.configure_topology_data(pctx, cluster)
    run.start_dn_nm_processes(instances)
Exemplo n.º 4
0
def scale_cluster(pctx, cluster, instances):
    config.configure_instances(pctx, instances)
    _update_include_files(cluster)
    run.refresh_hadoop_nodes(cluster)
    rm = vu.get_resourcemanager(cluster)
    if rm:
        run.refresh_yarn_nodes(cluster)

    config.configure_topology_data(pctx, cluster)
    run.start_all_processes(instances, [])
Exemplo n.º 5
0
def scale_cluster(pctx, cluster, instances):
    config.configure_instances(pctx, instances)
    _update_include_files(cluster)
    run.refresh_hadoop_nodes(cluster)
    rm = vu.get_resourcemanager(cluster)
    if rm:
        run.refresh_yarn_nodes(cluster)

    config.configure_topology_data(pctx, cluster)
    run.start_dn_nm_processes(instances)
    swift_helper.install_ssl_certs(instances)
    config.configure_zookeeper(cluster)
    run.refresh_zk_servers(cluster)
Exemplo n.º 6
0
def decommission_nodes(pctx, cluster, instances):
    datanodes = _get_instances_with_service(instances, 'datanode')
    nodemanagers = _get_instances_with_service(instances, 'nodemanager')
    _update_exclude_files(cluster, instances)

    run.refresh_hadoop_nodes(cluster)
    run.refresh_yarn_nodes(cluster)

    _check_nodemanagers_decommission(cluster, nodemanagers)
    _check_datanodes_decommission(cluster, datanodes)

    _update_include_files(cluster)
    _clear_exclude_files(cluster)

    config.configure_topology_data(pctx, cluster)
Exemplo n.º 7
0
def decommission_nodes(pctx, cluster, instances):
    datanodes = _get_instances_with_service(instances, 'datanode')
    nodemanagers = _get_instances_with_service(instances, 'nodemanager')
    _update_exclude_files(cluster, instances)

    run.refresh_hadoop_nodes(cluster)
    rm = vu.get_resourcemanager(cluster)
    if rm:
        run.refresh_yarn_nodes(cluster)

    _check_nodemanagers_decommission(cluster, nodemanagers)
    _check_datanodes_decommission(cluster, datanodes)

    _update_include_files(cluster, instances)
    _clear_exclude_files(cluster)
    run.refresh_hadoop_nodes(cluster)

    config.configure_topology_data(pctx, cluster)
Exemplo n.º 8
0
def decommission_nodes(pctx, cluster, instances):
    datanodes = _get_instances_with_service(instances, 'datanode')
    nodemanagers = _get_instances_with_service(instances, 'nodemanager')
    _update_exclude_files(cluster, instances)

    run.refresh_hadoop_nodes(cluster)
    rm = vu.get_resourcemanager(cluster)
    if rm:
        run.refresh_yarn_nodes(cluster)

    _check_nodemanagers_decommission(cluster, nodemanagers)
    _check_datanodes_decommission(cluster, datanodes)

    _update_include_files(cluster, instances)
    _clear_exclude_files(cluster)
    run.refresh_hadoop_nodes(cluster)

    config.configure_topology_data(pctx, cluster)
    config.configure_zookeeper(cluster, instances)
    # TODO(shuyingya):should invent a way to lastly restart the leader node
    run.refresh_zk_servers(cluster, instances)