def test_refresh_yarn_nodes(self, add_provisioning_step, check_cluster_exists, get_resourcemanager): cluster = mock.Mock() get_resourcemanager.return_value = self.instance rs.refresh_yarn_nodes(cluster) get_resourcemanager.assert_called_once_with(cluster) self.remote.execute_command.assert_called_once_with( 'sudo su - -c "yarn rmadmin -refreshNodes" hadoop')
def scale_cluster(pctx, cluster, instances): config.configure_instances(pctx, instances) _update_include_files(cluster) run.refresh_hadoop_nodes(cluster) run.refresh_yarn_nodes(cluster) config.configure_topology_data(pctx, cluster) for instance in instances: run.start_instance(instance)
def scale_cluster(pctx, cluster, instances): config.configure_instances(pctx, instances) _update_include_files(cluster) run.refresh_hadoop_nodes(cluster) rm = vu.get_resourcemanager(cluster) if rm: run.refresh_yarn_nodes(cluster) config.configure_topology_data(pctx, cluster) run.start_dn_nm_processes(instances)
def scale_cluster(pctx, cluster, instances): config.configure_instances(pctx, instances) _update_include_files(cluster) run.refresh_hadoop_nodes(cluster) rm = vu.get_resourcemanager(cluster) if rm: run.refresh_yarn_nodes(cluster) config.configure_topology_data(pctx, cluster) run.start_all_processes(instances, [])
def scale_cluster(pctx, cluster, instances): config.configure_instances(pctx, instances) _update_include_files(cluster) run.refresh_hadoop_nodes(cluster) rm = vu.get_resourcemanager(cluster) if rm: run.refresh_yarn_nodes(cluster) config.configure_topology_data(pctx, cluster) run.start_dn_nm_processes(instances) swift_helper.install_ssl_certs(instances) config.configure_zookeeper(cluster) run.refresh_zk_servers(cluster)
def decommission_nodes(pctx, cluster, instances): datanodes = _get_instances_with_service(instances, 'datanode') nodemanagers = _get_instances_with_service(instances, 'nodemanager') _update_exclude_files(cluster, instances) run.refresh_hadoop_nodes(cluster) run.refresh_yarn_nodes(cluster) _check_nodemanagers_decommission(cluster, nodemanagers) _check_datanodes_decommission(cluster, datanodes) _update_include_files(cluster) _clear_exclude_files(cluster) config.configure_topology_data(pctx, cluster)
def decommission_nodes(pctx, cluster, instances): datanodes = _get_instances_with_service(instances, 'datanode') nodemanagers = _get_instances_with_service(instances, 'nodemanager') _update_exclude_files(cluster, instances) run.refresh_hadoop_nodes(cluster) rm = vu.get_resourcemanager(cluster) if rm: run.refresh_yarn_nodes(cluster) _check_nodemanagers_decommission(cluster, nodemanagers) _check_datanodes_decommission(cluster, datanodes) _update_include_files(cluster, instances) _clear_exclude_files(cluster) run.refresh_hadoop_nodes(cluster) config.configure_topology_data(pctx, cluster)
def decommission_nodes(pctx, cluster, instances): datanodes = _get_instances_with_service(instances, 'datanode') nodemanagers = _get_instances_with_service(instances, 'nodemanager') _update_exclude_files(cluster, instances) run.refresh_hadoop_nodes(cluster) rm = vu.get_resourcemanager(cluster) if rm: run.refresh_yarn_nodes(cluster) _check_nodemanagers_decommission(cluster, nodemanagers) _check_datanodes_decommission(cluster, datanodes) _update_include_files(cluster, instances) _clear_exclude_files(cluster) run.refresh_hadoop_nodes(cluster) config.configure_topology_data(pctx, cluster) config.configure_zookeeper(cluster, instances) # TODO(shuyingya):should invent a way to lastly restart the leader node run.refresh_zk_servers(cluster, instances)