Ejemplo n.º 1
0
def _check_storm(cluster):
    dr_count = utils.get_instances_count(cluster, common.DRPC_SERVER)
    ni_count = utils.get_instances_count(cluster, common.NIMBUS)
    su_count = utils.get_instances_count(cluster, common.STORM_UI_SERVER)
    sv_count = utils.get_instances_count(cluster, common.SUPERVISOR)
    if dr_count > 1:
        raise ex.InvalidComponentCountException(common.DRPC_SERVER,
                                                _("0 or 1"), dr_count)
    if ni_count > 1:
        raise ex.InvalidComponentCountException(common.NIMBUS, _("0 or 1"),
                                                ni_count)
    if su_count > 1:
        raise ex.InvalidComponentCountException(common.STORM_UI_SERVER,
                                                _("0 or 1"), su_count)
    if dr_count == 0 and ni_count == 1:
        raise ex.RequiredServiceMissingException(common.DRPC_SERVER,
                                                 required_by=common.NIMBUS)
    if dr_count == 1 and ni_count == 0:
        raise ex.RequiredServiceMissingException(
            common.NIMBUS, required_by=common.DRPC_SERVER)
    if su_count == 1 and (dr_count == 0 or ni_count == 0):
        raise ex.RequiredServiceMissingException(
            common.NIMBUS, required_by=common.STORM_UI_SERVER)
    if dr_count == 1 and sv_count == 0:
        raise ex.RequiredServiceMissingException(
            common.SUPERVISOR, required_by=common.DRPC_SERVER)
    if sv_count > 0 and dr_count == 0:
        raise ex.RequiredServiceMissingException(common.DRPC_SERVER,
                                                 required_by=common.SUPERVISOR)
Ejemplo n.º 2
0
    def _impala_validation(cls, cluster):
        ics_count = cls.get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
        iss_count = cls.get_inst_count(cluster, 'IMPALA_STATESTORE')
        id_count = cls.get_inst_count(cluster, 'IMPALAD')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        hms_count = cls.get_inst_count(cluster, 'HIVE_METASTORE')

        if ics_count > 1:
            raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                    _('0 or 1'), ics_count)
        if iss_count > 1:
            raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                    _('0 or 1'), iss_count)
        if ics_count == 1:
            datanode_ng = u.get_node_groups(cluster, "HDFS_DATANODE")
            impalad_ng = u.get_node_groups(cluster, "IMPALAD")
            datanodes = set(ng.id for ng in datanode_ng)
            impalads = set(ng.id for ng in impalad_ng)

            if datanodes != impalads:
                raise ex.InvalidClusterTopology(
                    _("IMPALAD must be installed on every HDFS_DATANODE"))

            if iss_count != 1:
                raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                         required_by='IMPALA')
            if id_count < 1:
                raise ex.RequiredServiceMissingException('IMPALAD',
                                                         required_by='IMPALA')
            if dn_count < 1:
                raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                         required_by='IMPALA')
            if hms_count < 1:
                raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                         required_by='IMPALA')
Ejemplo n.º 3
0
    def _hue_validation(cls, cluster):
        hue_count = cls.get_inst_count(cluster, 'HUE_SERVER')
        if hue_count > 1:
            raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                    hue_count)

        shs_count = cls.get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
        hms_count = cls.get_inst_count(cluster, 'HIVE_METASTORE')
        oo_count = cls.get_inst_count(cluster, 'OOZIE_SERVER')
        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')

        if shs_count > 1:
            raise ex.InvalidComponentCountException(
                'SPARK_YARN_HISTORY_SERVER', _('0 or 1'), shs_count)
        if shs_count and not rm_count:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER',
                required_by='SPARK_YARN_HISTORY_SERVER')

        if oo_count < 1 and hue_count:
            raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                     required_by='HUE_SERVER')

        if hms_count < 1 and hue_count:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='HUE_SERVER')
Ejemplo n.º 4
0
    def _yarn_ha_validation(cls, cluster):
        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')
        stdb_rm_count = cls.get_inst_count(cluster, 'YARN_STANDBYRM')

        require_anti_affinity = cls.PU.c_helper.get_required_anti_affinity(
            cluster)

        if stdb_rm_count > 1:
            raise ex.InvalidComponentCountException('YARN_STANDBYRM',
                                                    _('0 or 1'), stdb_rm_count)
        if stdb_rm_count > 0:
            if rm_count < 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_RESOURCEMANAGER', required_by='RM HA')
            if zk_count < 1:
                raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                         required_by='RM HA')
            if require_anti_affinity:
                if 'YARN_RESOURCEMANAGER' not in \
                        cls._get_anti_affinity(cluster):
                    raise ex.ResourceManagerHAConfigurationError(
                        _('YARN_RESOURCEMANAGER should be enabled in '
                          'anti_affinity.'))
                if 'YARN_STANDBYRM' not in cls._get_anti_affinity(cluster):
                    raise ex.ResourceManagerHAConfigurationError(
                        _('YARN_STANDBYRM should be'
                          ' enabled in anti_affinity.'))
Ejemplo n.º 5
0
def validate_cluster_creating(pctx, cluster):
    nn_count = _get_inst_count(cluster, 'namenode')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('namenode', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'secondarynamenode')
    if snn_count not in [0, 1]:
        raise ex.InvalidComponentCountException('secondarynamenode',
                                                _('0 or 1'), snn_count)

    rm_count = _get_inst_count(cluster, 'resourcemanager')
    if rm_count not in [0, 1]:
        raise ex.InvalidComponentCountException('resourcemanager', _('0 or 1'),
                                                rm_count)

    hs_count = _get_inst_count(cluster, 'historyserver')
    if hs_count not in [0, 1]:
        raise ex.InvalidComponentCountException('historyserver', _('0 or 1'),
                                                hs_count)

    nm_count = _get_inst_count(cluster, 'nodemanager')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException('resourcemanager',
                                                     required_by='nodemanager')

    oo_count = _get_inst_count(cluster, 'oozie')
    dn_count = _get_inst_count(cluster, 'datanode')
    if oo_count not in [0, 1]:
        raise ex.InvalidComponentCountException('oozie', _('0 or 1'), oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('datanode',
                                                     required_by='oozie')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException('nodemanager',
                                                     required_by='oozie')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException('historyserver',
                                                     required_by='oozie')

    rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)
    if dn_count < rep_factor:
        raise ex.InvalidComponentCountException(
            'datanode', rep_factor, dn_count,
            _('Number of datanodes must be '
              'not less than '
              'dfs.replication.'))

    hive_count = _get_inst_count(cluster, 'hiveserver')
    if hive_count not in [0, 1]:
        raise ex.InvalidComponentCountException('hive', _('0 or 1'),
                                                hive_count)
Ejemplo n.º 6
0
    def _solr_validation(cls, cluster):
        slr_count = cls.get_inst_count(cluster, 'SOLR_SERVER')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')

        if slr_count >= 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='SOLR_SERVER')
            if zk_count < 1:
                raise ex.RequiredServiceMissingException(
                    'ZOOKEEPER', required_by='SOLR_SERVER')
Ejemplo n.º 7
0
def _check_hbase(cluster):
    hm_count = utils.get_instances_count(cluster, common.HBASE_MASTER)
    hr_count = utils.get_instances_count(cluster, common.HBASE_REGIONSERVER)
    if hm_count > 1:
        raise ex.InvalidComponentCountException(common.HBASE_MASTER,
                                                _("0 or 1"), hm_count)
    if hm_count == 1 and hr_count == 0:
        raise ex.RequiredServiceMissingException(
            common.HBASE_REGIONSERVER, required_by=common.HBASE_MASTER)
    if hr_count > 0 and hm_count == 0:
        raise ex.RequiredServiceMissingException(
            common.HBASE_MASTER, required_by=common.HBASE_REGIONSERVER)
Ejemplo n.º 8
0
    def _basic_validation(cls, cluster):

        mng_count = cls.get_inst_count(cluster, 'CLOUDERA_MANAGER')
        if mng_count != 1:
            raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                    mng_count)

        nn_count = cls.get_inst_count(cluster, 'HDFS_NAMENODE')
        if nn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1,
                                                    nn_count)

        snn_count = cls.get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
        if snn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE',
                                                    1, snn_count)
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        replicas = cls.PU.get_config_value('HDFS', 'dfs_replication', cluster)
        if dn_count < replicas:
            raise ex.InvalidComponentCountException(
                'HDFS_DATANODE', replicas, dn_count,
                _('Number of datanodes must be not'
                  ' less than dfs_replication.'))

        du_reserved = cls.PU.get_config_value('DATANODE',
                                              'dfs_datanode_du_reserved',
                                              cluster)
        du_reserved = du_reserved / 1073741824.
        for node_group in cluster.node_groups:
            volume_size = node_group.volumes_size
            if volume_size and volume_size < du_reserved:
                raise ex.InvalidVolumeSizeException(volume_size, du_reserved)

        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
        if rm_count > 1:
            raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                    _('0 or 1'), rm_count)

        hs_count = cls.get_inst_count(cluster, 'YARN_JOBHISTORY')
        if hs_count > 1:
            raise ex.InvalidComponentCountException('YARN_JOBHISTORY',
                                                    _('0 or 1'), hs_count)

        if rm_count > 0 and hs_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

        nm_count = cls.get_inst_count(cluster, 'YARN_NODEMANAGER')
        if rm_count == 0:
            if nm_count > 0:
                raise ex.RequiredServiceMissingException(
                    'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')
Ejemplo n.º 9
0
    def validate(self, cluster):
        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        snn_count = sum([
            ng.count
            for ng in utils.get_node_groups(cluster, 'secondarynamenode')
        ])
        if snn_count > 1:
            raise ex.InvalidComponentCountException('secondarynamenode',
                                                    _('0 or 1'), snn_count)

        jt_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "jobtracker")])

        if jt_count > 1:
            raise ex.InvalidComponentCountException("jobtracker", _('0 or 1'),
                                                    jt_count)

        oozie_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "oozie")])

        if oozie_count > 1:
            raise ex.InvalidComponentCountException("oozie", _('0 or 1'),
                                                    oozie_count)

        hive_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "hiveserver")])
        if jt_count == 0:

            tt_count = sum([
                ng.count
                for ng in utils.get_node_groups(cluster, "tasktracker")
            ])
            if tt_count > 0:
                raise ex.RequiredServiceMissingException(
                    "jobtracker", required_by="tasktracker")

            if oozie_count > 0:
                raise ex.RequiredServiceMissingException("jobtracker",
                                                         required_by="oozie")

            if hive_count > 0:
                raise ex.RequiredServiceMissingException("jobtracker",
                                                         required_by="hive")

        if hive_count > 1:
            raise ex.InvalidComponentCountException("hive", _('0 or 1'),
                                                    hive_count)
Ejemplo n.º 10
0
def _check_hive(cluster):
    hs_count = utils.get_instances_count(cluster, common.HIVE_SERVER)
    hm_count = utils.get_instances_count(cluster, common.HIVE_METASTORE)
    if hs_count > 1:
        raise ex.InvalidComponentCountException(common.HIVE_SERVER,
                                                _("0 or 1"), hs_count)
    if hm_count > 1:
        raise ex.InvalidComponentCountException(common.HIVE_METASTORE,
                                                _("0 or 1"), hm_count)
    if hs_count == 0 and hm_count == 1:
        raise ex.RequiredServiceMissingException(
            common.HIVE_SERVER, required_by=common.HIVE_METASTORE)
    if hs_count == 1 and hm_count == 0:
        raise ex.RequiredServiceMissingException(
            common.HIVE_METASTORE, required_by=common.HIVE_SERVER)
Ejemplo n.º 11
0
def _check_ranger(cluster):
    ra_count = utils.get_instances_count(cluster, common.RANGER_ADMIN)
    ru_count = utils.get_instances_count(cluster, common.RANGER_USERSYNC)
    if ra_count > 1:
        raise ex.InvalidComponentCountException(common.RANGER_ADMIN,
                                                _("0 or 1"), ra_count)
    if ru_count > 1:
        raise ex.InvalidComponentCountException(common.RANGER_USERSYNC,
                                                _("0 or 1"), ru_count)
    if ra_count == 1 and ru_count == 0:
        raise ex.RequiredServiceMissingException(
            common.RANGER_USERSYNC, required_by=common.RANGER_ADMIN)
    if ra_count == 0 and ru_count == 1:
        raise ex.RequiredServiceMissingException(
            common.RANGER_ADMIN, required_by=common.RANGER_USERSYNC)
Ejemplo n.º 12
0
    def _sentry_validation(cls, cluster):

        snt_count = cls.get_inst_count(cluster, 'SENTRY_SERVER')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')

        if snt_count > 1:
            raise ex.InvalidComponentCountException('SENTRY_SERVER',
                                                    _('0 or 1'), snt_count)
        if snt_count == 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='SENTRY_SERVER')
            if zk_count < 1:
                raise ex.RequiredServiceMissingException(
                    'ZOOKEEPER', required_by='SENTRY_SERVER')
Ejemplo n.º 13
0
    def validate(self, cluster):
        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        # validate Spark Master Node and Spark Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"), sl_count)
Ejemplo n.º 14
0
    def _hdfs_ha_validation(cls, cluster):
        jn_count = cls.get_inst_count(cluster, 'HDFS_JOURNALNODE')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')

        require_anti_affinity = cls.PU.c_helper.get_required_anti_affinity(
            cluster)

        if jn_count > 0:
            if jn_count < 3:
                raise ex.InvalidComponentCountException(
                    'HDFS_JOURNALNODE', _('not less than 3'), jn_count)
            if not jn_count % 2:
                raise ex.InvalidComponentCountException(
                    'HDFS_JOURNALNODE', _('be odd'), jn_count)
            if zk_count < 1:
                raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                         required_by='HDFS HA')
            if require_anti_affinity:
                if 'HDFS_SECONDARYNAMENODE' not in \
                        cls._get_anti_affinity(cluster):
                    raise ex.NameNodeHAConfigurationError(
                        _('HDFS_SECONDARYNAMENODE should be enabled '
                          'in anti_affinity.'))
                if 'HDFS_NAMENODE' not in cls._get_anti_affinity(cluster):
                    raise ex.NameNodeHAConfigurationError(
                        _('HDFS_NAMENODE should be enabled in anti_affinity.'))
Ejemplo n.º 15
0
    def _flume_validation(cls, cluster):
        a_count = cls.get_inst_count(cluster, 'FLUME_AGENT')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')

        if a_count >= 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='FLUME_AGENT')
Ejemplo n.º 16
0
    def validate(cluster_context, service, required_by):
        if not cluster_context.is_present(service):
            service_name = service.ui_name
            if service.version:
                service_name += " %s" % service.version

            raise e.RequiredServiceMissingException(service_name,
                                                    required_by.ui_name)
Ejemplo n.º 17
0
    def _basic_validation(cls, cluster):

        mng_count = cls.get_inst_count(cluster, 'CLOUDERA_MANAGER')
        if mng_count != 1:
            raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                    mng_count)

        nn_count = cls.get_inst_count(cluster, 'HDFS_NAMENODE')
        if nn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1,
                                                    nn_count)

        snn_count = cls.get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
        if snn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE',
                                                    1, snn_count)
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        replicas = cls.PU.get_config_value('HDFS', 'dfs_replication', cluster)
        if dn_count < replicas:
            raise ex.InvalidComponentCountException(
                'HDFS_DATANODE', replicas, dn_count,
                _('Number of datanodes must be not'
                  ' less than dfs_replication.'))

        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
        if rm_count > 1:
            raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                    _('0 or 1'), rm_count)

        hs_count = cls.get_inst_count(cluster, 'YARN_JOBHISTORY')
        if hs_count > 1:
            raise ex.InvalidComponentCountException('YARN_JOBHISTORY',
                                                    _('0 or 1'), hs_count)

        if rm_count > 0 and hs_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

        nm_count = cls.get_inst_count(cluster, 'YARN_NODEMANAGER')
        if rm_count == 0:
            if nm_count > 0:
                raise ex.RequiredServiceMissingException(
                    'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')
Ejemplo n.º 18
0
    def _sqoop_validation(cls, cluster):

        s2s_count = cls.get_inst_count(cluster, 'SQOOP_SERVER')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        hs_count = cls.get_inst_count(cluster, 'YARN_JOBHISTORY')
        nm_count = cls.get_inst_count(cluster, 'YARN_NODEMANAGER')

        if s2s_count > 1:
            raise ex.InvalidComponentCountException('SQOOP_SERVER',
                                                    _('0 or 1'), s2s_count)
        if s2s_count == 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='SQOOP_SERVER')
            if nm_count < 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
            if hs_count != 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_JOBHISTORY', required_by='SQOOP_SERVER')
Ejemplo n.º 19
0
    def _hive_validation(cls, cluster):
        hms_count = cls.get_inst_count(cluster, 'HIVE_METASTORE')
        hvs_count = cls.get_inst_count(cluster, 'HIVE_SERVER2')
        whc_count = cls.get_inst_count(cluster, 'HIVE_WEBHCAT')
        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')

        if hms_count and rm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='HIVE_METASTORE')

        if hms_count and not hvs_count:
            raise ex.RequiredServiceMissingException(
                'HIVE_SERVER2', required_by='HIVE_METASTORE')

        if hvs_count and not hms_count:
            raise ex.RequiredServiceMissingException(
                'HIVE_METASTORE', required_by='HIVE_SERVER2')

        if whc_count and not hms_count:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='WEBHCAT')
Ejemplo n.º 20
0
    def _hbase_indexer_validation(cls, cluster):

        lhbi_count = cls.get_inst_count(cluster, 'HBASE_INDEXER')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        slr_count = cls.get_inst_count(cluster, 'SOLR_SERVER')
        hbm_count = cls.get_inst_count(cluster, 'HBASE_MASTER')

        if lhbi_count >= 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='HBASE_INDEXER')
            if zk_count < 1:
                raise ex.RequiredServiceMissingException(
                    'ZOOKEEPER', required_by='HBASE_INDEXER')
            if slr_count < 1:
                raise ex.RequiredServiceMissingException(
                    'SOLR_SERVER', required_by='HBASE_INDEXER')
            if hbm_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HBASE_MASTER', required_by='HBASE_INDEXER')
Ejemplo n.º 21
0
def create_hbase_common_lib(r):
    r.execute_command('sudo su - -c "hadoop dfs -mkdir -p %s" hdfs' %
                      (HBASE_COMMON_LIB_PATH))
    ret_code, stdout = r.execute_command('hbase classpath')
    if ret_code == 0:
        paths = stdout.split(':')
        for p in paths:
            if p.endswith(".jar"):
                r.execute_command(
                    'sudo su - -c "hadoop fs -put -p %s %s" hdfs' %
                    (p, HBASE_COMMON_LIB_PATH))
    else:
        raise ex.RequiredServiceMissingException('hbase')
Ejemplo n.º 22
0
    def _oozie_validation(cls, cluster):

        oo_count = cls._get_inst_count(cluster, 'OOZIE_SERVER')
        dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
        nm_count = cls._get_inst_count(cluster, 'YARN_NODEMANAGER')
        hs_count = cls._get_inst_count(cluster, 'YARN_JOBHISTORY')

        if oo_count > 1:
            raise ex.InvalidComponentCountException('OOZIE_SERVER',
                                                    _('0 or 1'), oo_count)

        if oo_count == 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='OOZIE_SERVER')

            if nm_count < 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

            if hs_count != 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_JOBHISTORY', required_by='OOZIE_SERVER')
Ejemplo n.º 23
0
    def validate(self, cluster):
        # validate Storm Master Node and Storm Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "nimbus")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Storm nimbus")

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "supervisor")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Storm supervisor",
                                                    _("1 or more"), sl_count)
Ejemplo n.º 24
0
    def _hbase_validation(cls, cluster):
        hbm_count = cls._get_inst_count(cluster, 'HBASE_MASTER')
        hbr_count = cls._get_inst_count(cluster, 'HBASE_REGIONSERVER')
        zk_count = cls._get_inst_count(cluster, 'ZOOKEEPER_SERVER')

        if hbm_count >= 1:
            if zk_count < 1:
                raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                         required_by='HBASE')
            if hbr_count < 1:
                raise ex.InvalidComponentCountException(
                    'HBASE_REGIONSERVER', _('at least 1'), hbr_count)
        elif hbr_count >= 1:
            raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                    _('at least 1'), hbm_count)
Ejemplo n.º 25
0
    def validate(self, cluster):
        if cluster.hadoop_version == "1.0.0":
            raise exceptions.DeprecatedException(
                _("Support for Spark version 1.0.0 is now deprecated and will"
                  " be removed in the 2016.1 release."))

        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                'datanode',
                _('%s or more') % rep_factor, dn_count,
                _('Number of %(dn)s instances should not be less '
                  'than %(replication)s') % {
                      'dn': 'datanode',
                      'replication': 'dfs.replication'
                  })

        # validate Spark Master Node and Spark Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"), sl_count)
Ejemplo n.º 26
0
    def validate(self, cluster):
        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                'datanode',
                _('%s or more') % rep_factor, dn_count,
                _('Number of %(dn)s instances should not be less '
                  'than %(replication)s') % {
                      'dn': 'datanode',
                      'replication': 'dfs.replication'
                  })

        # validate Spark Master Node and Spark Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count < 1:
            raise ex.RequiredServiceMissingException("Spark master")

        if sm_count >= 2:
            raise ex.InvalidComponentCountException("Spark master", "1",
                                                    sm_count)

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"), sl_count)
Ejemplo n.º 27
0
def validate_cluster_creating(cluster):
    mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                mng_count)

    zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')
    nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
                                                snn_count)

    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
    replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster)
    if dn_count < replicas:
        raise ex.InvalidComponentCountException(
            'HDFS_DATANODE', replicas, dn_count,
            _('Number of datanodes must be not less than dfs_replication.'))

    jn_count = _get_inst_count(cluster, 'HDFS_JOURNALNODE')
    require_anti_affinity = PU.c_helper.get_required_anti_affinity(cluster)
    if jn_count > 0:
        if jn_count < 3:
            raise ex.InvalidComponentCountException('HDFS_JOURNALNODE',
                                                    _('not less than 3'),
                                                    jn_count)
        if not jn_count % 2:
            raise ex.InvalidComponentCountException('HDFS_JOURNALNODE',
                                                    _('be odd'), jn_count)
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HDFS HA')
        if require_anti_affinity:
            if 'HDFS_SECONDARYNAMENODE' not in _get_anti_affinity(cluster):
                raise ex.NameNodeHAConfigurationError(
                    _('HDFS_SECONDARYNAMENODE should be enabled '
                      'in anti_affinity.'))
            if 'HDFS_NAMENODE' not in _get_anti_affinity(cluster):
                raise ex.NameNodeHAConfigurationError(
                    _('HDFS_NAMENODE should be enabled in anti_affinity.'))

    rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
    if rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                _('0 or 1'), rm_count)

    stdb_rm_count = _get_inst_count(cluster, 'YARN_STANDBYRM')
    if stdb_rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_STANDBYRM', _('0 or 1'),
                                                stdb_rm_count)
    if stdb_rm_count > 0:
        if rm_count < 1:
            raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                     required_by='RM HA')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='RM HA')
        if require_anti_affinity:
            if 'YARN_RESOURCEMANAGER' not in _get_anti_affinity(cluster):
                raise ex.ResourceManagerHAConfigurationError(
                    _('YARN_RESOURCEMANAGER should be enabled in '
                      'anti_affinity.'))
            if 'YARN_STANDBYRM' not in _get_anti_affinity(cluster):
                raise ex.ResourceManagerHAConfigurationError(
                    _('YARN_STANDBYRM should be enabled in anti_affinity.'))

    hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
    if hs_count > 1:
        raise ex.InvalidComponentCountException('YARN_JOBHISTORY', _('0 or 1'),
                                                hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    if oo_count > 1:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
    whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                 required_by='HIVE_METASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException('HIVE_SERVER2',
                                                 required_by='HIVE_METASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_SERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count > 1:
        raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count > 1:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                _('0 or 1'), shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                 required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
    hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException('HBASE_REGIONSERVER',
                                                    _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                _('at least 1'), hbm_count)

    a_count = _get_inst_count(cluster, 'FLUME_AGENT')
    if a_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='FLUME_AGENT')

    snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
    if snt_count > 1:
        raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
                                                snt_count)
    if snt_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SENTRY_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='SENTRY_SERVER')

    slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
    if slr_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='SOLR_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='SOLR_SERVER')

    s2s_count = _get_inst_count(cluster, 'SQOOP_SERVER')
    if s2s_count > 1:
        raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'),
                                                s2s_count)
    if s2s_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SQOOP_SERVER')
        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='SQOOP_SERVER')

    lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
    if lhbi_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='HBASE_INDEXER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='HBASE_INDEXER')
        if slr_count < 1:
            raise ex.RequiredServiceMissingException(
                'SOLR_SERVER', required_by='HBASE_INDEXER')
        if hbm_count < 1:
            raise ex.RequiredServiceMissingException(
                'HBASE_MASTER', required_by='HBASE_INDEXER')

    ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
    iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
    id_count = _get_inst_count(cluster, 'IMPALAD')
    if ics_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                _('0 or 1'), ics_count)
    if iss_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                _('0 or 1'), iss_count)
    if ics_count == 1:
        datanodes = set(u.get_instances(cluster, "HDFS_DATANODE"))
        impalads = set(u.get_instances(cluster, "IMPALAD"))
        if len(datanodes ^ impalads) > 0:
            raise ex.InvalidClusterTopology(
                _("IMPALAD must be installed on every HDFS_DATANODE"))

        if iss_count != 1:
            raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                     required_by='IMPALA')
        if id_count < 1:
            raise ex.RequiredServiceMissingException('IMPALAD',
                                                     required_by='IMPALA')
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='IMPALA')
        if hms_count < 1:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='IMPALA')

    kms_count = _get_inst_count(cluster, 'KMS')
    if kms_count > 1:
        raise ex.InvalidComponentCountException('KMS', _('0 or 1'), kms_count)
Ejemplo n.º 28
0
def assert_present(service, cluster_context):
    if not cluster_context.is_present(service):
        raise e.RequiredServiceMissingException(service.ui_name)
Ejemplo n.º 29
0
 def validate_node_groups(self, cluster):
     for service in self.services:
         if service.deployed:
             service.validate(self, cluster)
         elif service.is_mandatory():
             raise ex.RequiredServiceMissingException(service.name)
Ejemplo n.º 30
0
def validate_cluster_creating(cluster):
    mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                mng_count)

    nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
                                                snn_count)

    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
    replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster)
    if dn_count < replicas:
        raise ex.InvalidComponentCountException(
            'HDFS_DATANODE', replicas, dn_count,
            _('Number of datanodes must be not less than dfs_replication.'))

    rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
    if rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                _('0 or 1'), rm_count)

    hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
    if hs_count > 1:
        raise ex.InvalidComponentCountException('YARN_JOBHISTORY', _('0 or 1'),
                                                hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    if oo_count > 1:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
    whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                 required_by='HIVE_METASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException('HIVE_SERVER2',
                                                 required_by='HIVE_METASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_SERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count > 1:
        raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count > 1:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                _('0 or 1'), shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                 required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
    hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')
    zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException('HBASE_REGIONSERVER',
                                                    _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                _('at least 1'), hbm_count)

    a_count = _get_inst_count(cluster, 'FLUME_AGENT')
    if a_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='FLUME_AGENT')

    snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
    if snt_count > 1:
        raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
                                                snt_count)
    if snt_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SENTRY_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='SENTRY_SERVER')

    slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
    if slr_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='SOLR_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='SOLR_SERVER')

    s2s_count = _get_inst_count(cluster, 'SQOOP_SERVER')
    if s2s_count > 1:
        raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'),
                                                s2s_count)
    if s2s_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SQOOP_SERVER')
        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='SQOOP_SERVER')

    lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
    if lhbi_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='HBASE_INDEXER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='HBASE_INDEXER')
        if slr_count < 1:
            raise ex.RequiredServiceMissingException(
                'SOLR_SERVER', required_by='HBASE_INDEXER')
        if hbm_count < 1:
            raise ex.RequiredServiceMissingException(
                'HBASE_MASTER', required_by='HBASE_INDEXER')

    ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
    iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
    id_count = _get_inst_count(cluster, 'IMPALAD')
    if ics_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                _('0 or 1'), ics_count)
    if iss_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                _('0 or 1'), iss_count)
    if ics_count == 1:
        if iss_count != 1:
            raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                     required_by='IMPALA')
        if id_count < 1:
            raise ex.RequiredServiceMissingException('IMPALAD',
                                                     required_by='IMPALA')
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='IMPALA')
        if hms_count < 1:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='IMPALA')

    kms_count = _get_inst_count(cluster, 'KMS')
    if kms_count > 1:
        raise ex.InvalidComponentCountException('KMS', _('0 or 1'), kms_count)