def _check_decommission(cluster, instances, check_func, option):
    utils.plugin_option_poll(cluster, is_decommissioned, option,
                             _("Wait for decommissioning"), 5, {
                                 'cluster': cluster,
                                 'check_func': check_func,
                                 'instances': instances
                             })
 def _start_cloudera_manager(self, cluster, timeout_config):
     manager = self.get_manager(cluster)
     with manager.remote() as r:
         cmd.start_cloudera_db(r)
         cmd.start_manager(r)
     u.plugin_option_poll(
         cluster, self._check_cloudera_manager_started, timeout_config,
         _("Await starting Cloudera Manager"), 2, {'manager': manager})
Esempio n. 3
0
 def _await_agents(self, cluster, instances, timeout_config):
     api = self.get_api_client(instances[0].cluster)
     utils.plugin_option_poll(cluster,
                              self._agents_connected, timeout_config,
                              _("Await Cloudera agents"), 5, {
                                  'instances': instances,
                                  'api': api
                              })
Esempio n. 4
0
def await_datanodes(cluster):
    datanodes_count = len(utils.get_instances(cluster, "datanode"))
    if datanodes_count < 1:
        return

    log_msg = _("Waiting on %d DataNodes to start up") % datanodes_count
    with utils.get_instance(cluster, "namenode").remote() as r:
        utils.plugin_option_poll(
            cluster, _check_datanodes_count,
            c_helper.DATANODES_STARTUP_TIMEOUT,
            log_msg, 1, {"remote": r, "count": datanodes_count})
def await_datanodes(cluster):
    datanodes_count = len(vu.get_datanodes(cluster))
    if datanodes_count < 1:
        return

    l_message = _("Waiting on %s datanodes to start up") % datanodes_count
    with vu.get_namenode(cluster).remote() as r:
        utils.plugin_option_poll(
            cluster, _check_datanodes_count,
            config_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
                'remote': r, 'count': datanodes_count})
Esempio n. 6
0
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
    with utils.get_remote(nn) as r:
        r.write_file_to('/etc/hadoop/dn.excl',
                        utils.generate_fqdn_host_names(inst_to_be_deleted))
        run.refresh_nodes(utils.get_remote(nn), "dfsadmin")
        context.sleep(3)

        utils.plugin_option_poll(nn.cluster, _is_decommissioned,
                                 c_helper.DECOMMISSIONING_TIMEOUT,
                                 _("Decommission %s") % "DataNodes", 3, {
                                     'r': r,
                                     'inst_to_be_deleted': inst_to_be_deleted
                                 })

        r.write_files_to({
            '/etc/hadoop/dn.incl':
            utils.generate_fqdn_host_names(survived_inst),
            '/etc/hadoop/dn.excl':
            ""
        })