def _check_decommission(cluster, instances, check_func, option):
    poll_utils.plugin_option_poll(cluster, is_decommissioned, option,
                                  _("Wait for decommissioning"), 5, {
                                      'cluster': cluster,
                                      'check_func': check_func,
                                      'instances': instances
                                  })
 def wait_for_host_registrations(self, num_hosts, ambari_info):
     cluster = ambari_info.get_cluster()
     poll_utils.plugin_option_poll(
         cluster, self._check_host_registrations,
         cfgprov.HOST_REGISTRATIONS_TIMEOUT,
         _("Wait for host registrations"), 5, {
             'num_hosts': num_hosts, 'ambari_info': ambari_info})
 def wait_for_host_registrations(self, num_hosts, ambari_info):
     cluster = ambari_info.get_cluster()
     poll_utils.plugin_option_poll(cluster, self._check_host_registrations,
                                   cfgprov.HOST_REGISTRATIONS_TIMEOUT,
                                   _("Wait for host registrations"), 5, {
                                       'num_hosts': num_hosts,
                                       'ambari_info': ambari_info
                                   })
Exemple #4
0
 def _start_cloudera_manager(self, cluster, timeout_config):
     manager = self.get_manager(cluster)
     with manager.remote() as r:
         cmd.start_cloudera_db(r)
         cmd.start_manager(r)
     poll_utils.plugin_option_poll(
         cluster, self._check_cloudera_manager_started, timeout_config,
         _("Await starting Cloudera Manager"), 2, {'manager': manager})
Exemple #5
0
 def _await_agents(self, cluster, instances, timeout_config):
     api = self.get_api_client(instances[0].cluster)
     poll_utils.plugin_option_poll(cluster,
                                   self._agents_connected, timeout_config,
                                   _("Await Cloudera agents"), 5, {
                                       'instances': instances,
                                       'api': api
                                   })
Exemple #6
0
 def _start_cloudera_manager(self, cluster, timeout_config):
     manager = self.get_manager(cluster)
     with manager.remote() as r:
         cmd.start_cloudera_db(r)
         cmd.start_manager(r)
     poll_utils.plugin_option_poll(
         cluster, self._check_cloudera_manager_started, timeout_config,
         _("Await starting Cloudera Manager"), 2, {'manager': manager})
Exemple #7
0
def _check_decommission(cluster, instances, check_func, option):
    poll_utils.plugin_option_poll(
        cluster,
        is_decommissioned,
        option,
        _("Wait for decommissioning"),
        5,
        {"cluster": cluster, "check_func": check_func, "instances": instances},
    )
 def _await_agents(self, cluster, instances, timeout_config):
     api = self.get_api_client(instances[0].cluster)
     poll_utils.plugin_option_poll(
         cluster,
         self._agents_connected,
         timeout_config,
         _("Await Cloudera agents"),
         5,
         {"instances": instances, "api": api},
     )
Exemple #9
0
def await_datanodes(cluster):
    datanodes_count = len(utils.get_instances(cluster, "datanode"))
    if datanodes_count < 1:
        return

    log_msg = _("Waiting on %d DataNodes to start up") % datanodes_count
    with utils.get_instance(cluster, "namenode").remote() as r:
        poll_utils.plugin_option_poll(
            cluster, _check_datanodes_count,
            c_helper.DATANODES_STARTUP_TIMEOUT,
            log_msg, 1, {"remote": r, "count": datanodes_count})
Exemple #10
0
def await_datanodes(cluster):
    datanodes_count = len(vu.get_datanodes(cluster))
    if datanodes_count < 1:
        return

    l_message = _("Waiting on %s datanodes to start up") % datanodes_count
    with vu.get_namenode(cluster).remote() as r:
        poll_utils.plugin_option_poll(
            cluster, _check_datanodes_count,
            c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
                'remote': r, 'count': datanodes_count})
Exemple #11
0
 def test_plugin_poll_first_scenario(self, cluster_exists, logger):
     cluster_exists.return_value = True
     fake_get_status = mock.Mock()
     fake_get_status.side_effect = [False, False, True]
     fake_kwargs = {'kwargs': {'cat': 'tom', 'bond': 'james bond'}}
     poll_utils.plugin_option_poll(FakeCluster({}), fake_get_status,
                                   FakeOption(5, 'target', 'name'),
                                   'fake_operation', 5, **fake_kwargs)
     expected_call = mock.call('Operation with name fake_operation was '
                               'executed successfully in timeout 5')
     self.assertEqual([expected_call], logger.call_args_list)
 def test_plugin_poll_first_scenario(self, cluster_exists, logger):
     cluster_exists.return_value = True
     fake_get_status = mock.Mock()
     fake_get_status.side_effect = [False, False, True]
     fake_kwargs = {'kwargs': {'cat': 'tom', 'bond': 'james bond'}}
     poll_utils.plugin_option_poll(
         FakeCluster({}), fake_get_status, FakeOption(5, 'target', 'name'),
         'fake_operation', 5, **fake_kwargs)
     expected_call = mock.call('Operation with name fake_operation was '
                               'executed successfully in timeout 5')
     self.assertEqual([expected_call], logger.call_args_list)
def await_datanodes(cluster):
    datanodes_count = len(vu.get_datanodes(cluster))
    if datanodes_count < 1:
        return

    l_message = _("Waiting on %s datanodes to start up") % datanodes_count
    with vu.get_namenode(cluster).remote() as r:
        poll_utils.plugin_option_poll(
            cluster, _check_datanodes_count,
            c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
                'remote': r, 'count': datanodes_count})
    def _await_datanodes(self, cluster):
        datanodes_count = len(vu.get_datanodes(cluster))
        if datanodes_count < 1:
            return

        l_message = _("Waiting on %s datanodes to start up") % datanodes_count
        LOG.info(l_message)
        with remote.get_remote(vu.get_namenode(cluster)) as r:
            poll_utils.plugin_option_poll(
                cluster, run.check_datanodes_count,
                c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
                    'remote': r,
                    'count': datanodes_count})
Exemple #15
0
def await_datanodes(cluster):
    datanodes_count = len(utils.get_instances(cluster, "datanode"))
    if datanodes_count < 1:
        return

    log_msg = _("Waiting on %d DataNodes to start up") % datanodes_count
    with utils.get_instance(cluster, "namenode").remote() as r:
        poll_utils.plugin_option_poll(cluster, _check_datanodes_count,
                                      c_helper.DATANODES_STARTUP_TIMEOUT,
                                      log_msg, 1, {
                                          "remote": r,
                                          "count": datanodes_count
                                      })
Exemple #16
0
    def _await_datanodes(self, cluster):
        datanodes_count = len(vu.get_datanodes(cluster))
        if datanodes_count < 1:
            return

        l_message = _("Waiting on %s datanodes to start up") % datanodes_count
        LOG.info(l_message)
        with remote.get_remote(vu.get_namenode(cluster)) as r:
            poll_utils.plugin_option_poll(
                cluster, run.check_datanodes_count,
                c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
                    'remote': r,
                    'count': datanodes_count})
Exemple #17
0
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
    with remote.get_remote(nn) as r:
        r.write_file_to('/etc/hadoop/dn.excl',
                        utils.generate_fqdn_host_names(
                            inst_to_be_deleted))
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        context.sleep(3)

        poll_utils.plugin_option_poll(
            nn.cluster, _is_decommissioned, c_helper.DECOMMISSIONING_TIMEOUT,
            _("Decommission %s") % "DataNodes", 3, {
                'r': r, 'inst_to_be_deleted': inst_to_be_deleted})

        r.write_files_to({
            '/etc/hadoop/dn.incl': utils.
            generate_fqdn_host_names(survived_inst),
            '/etc/hadoop/dn.excl': ""})
Exemple #18
0
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
    with remote.get_remote(nn) as r:
        r.write_file_to('/etc/hadoop/dn.excl',
                        utils.generate_fqdn_host_names(
                            inst_to_be_deleted))
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        context.sleep(3)

        poll_utils.plugin_option_poll(
            nn.cluster, is_decommissioned,
            config_helper.DECOMMISSIONING_TIMEOUT,
            _("Decommission %s") % "DataNodes", 3,
            {'r': r, 'inst_to_be_deleted': inst_to_be_deleted})

        r.write_files_to({'/etc/hadoop/dn.incl':
                          utils.generate_fqdn_host_names(survived_inst),
                          '/etc/hadoop/dn.excl': ""})
Exemple #19
0
def _check_decommission(cluster, instances, check_func, option):
    poll_utils.plugin_option_poll(
        cluster, is_decommissioned, option, _("Wait for decommissioning"),
        5, {'cluster': cluster, 'check_func': check_func,
            'instances': instances})
Exemple #20
0
def plugin_option_poll(cluster, get_status, option, operation_name, sleep_time,
                       kwargs):
    poll_utils.plugin_option_poll(cluster, get_status, option, operation_name,
                                  sleep_time, kwargs)