def scale_cluster(cluster, instances): if not instances: return if not cmd.is_pre_installed_cdh(instances[0].remote()): _configure_os(instances) _install_packages(instances, PACKAGES) _start_cloudera_agents(instances) _await_agents(instances) for instance in instances: _configure_instance(instance) cu.update_configs(instance) if 'DATANODE' in instance.node_group.node_processes: cu.refresh_nodes(cluster, 'DATANODE', cu.HDFS_SERVICE_NAME) _configure_swift_to_inst(instance) if 'DATANODE' in instance.node_group.node_processes: hdfs = cu.get_service('DATANODE', instance=instance) cu.start_roles(hdfs, cu.get_role_name(instance, 'DATANODE')) if 'NODEMANAGER' in instance.node_group.node_processes: yarn = cu.get_service('NODEMANAGER', instance=instance) cu.start_roles(yarn, cu.get_role_name(instance, 'NODEMANAGER'))
def decommission_cluster(cluster, instances): dns = [] nms = [] for i in instances: if 'DATANODE' in i.node_group.node_processes: dns.append(cu.get_role_name(i, 'DATANODE')) if 'NODEMANAGER' in i.node_group.node_processes: nms.append(cu.get_role_name(i, 'NODEMANAGER')) if dns: cu.decommission_nodes(cluster, 'DATANODE', dns) if nms: cu.decommission_nodes(cluster, 'NODEMANAGER', nms) cu.delete_instances(cluster, instances) cu.refresh_nodes(cluster, 'DATANODE', cu.HDFS_SERVICE_NAME) cu.refresh_nodes(cluster, 'NODEMANAGER', cu.YARN_SERVICE_NAME)