Exemple #1
0
    def validate(self, cluster):
        if cluster.hadoop_version == "1.0.0":
            raise exceptions.DeprecatedException(
                _("Support for Spark version 1.0.0 is now deprecated and will" " be removed in the 2016.1 release.")
            )

        nn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"), nn_count)

        rep_factor = utils.get_config_value_or_default("HDFS", "dfs.replication", cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                "datanode",
                _("%s or more") % rep_factor,
                dn_count,
                _("Number of %(dn)s instances should not be less " "than %(replication)s")
                % {"dn": "datanode", "replication": "dfs.replication"},
            )

        # validate Spark Master Node and Spark Slaves
        sm_count = sum([ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum([ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave", _("1 or more"), sl_count)
Exemple #2
0
    def validate(self, cluster):
        nn_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        # validate Spark Master Node and Spark Slaves
        sm_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"),
                                                    sl_count)
Exemple #3
0
    def _impala_validation(cls, cluster):
        ics_count = cls.get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
        iss_count = cls.get_inst_count(cluster, 'IMPALA_STATESTORE')
        id_count = cls.get_inst_count(cluster, 'IMPALAD')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        hms_count = cls.get_inst_count(cluster, 'HIVE_METASTORE')

        if ics_count > 1:
            raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                    _('0 or 1'), ics_count)
        if iss_count > 1:
            raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                    _('0 or 1'), iss_count)
        if ics_count == 1:
            datanode_ng = u.get_node_groups(cluster, "HDFS_DATANODE")
            impalad_ng = u.get_node_groups(cluster, "IMPALAD")
            datanodes = set(ng.id for ng in datanode_ng)
            impalads = set(ng.id for ng in impalad_ng)

            if datanodes != impalads:
                raise ex.InvalidClusterTopology(
                    _("IMPALAD must be installed on every HDFS_DATANODE"))

            if iss_count != 1:
                raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                         required_by='IMPALA')
            if id_count < 1:
                raise ex.RequiredServiceMissingException('IMPALAD',
                                                         required_by='IMPALA')
            if dn_count < 1:
                raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                         required_by='IMPALA')
            if hms_count < 1:
                raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                         required_by='IMPALA')
Exemple #4
0
    def _impala_validation(cls, cluster):
        ics_count = cls._get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
        iss_count = cls._get_inst_count(cluster, 'IMPALA_STATESTORE')
        id_count = cls._get_inst_count(cluster, 'IMPALAD')
        dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
        hms_count = cls._get_inst_count(cluster, 'HIVE_METASTORE')

        if ics_count > 1:
            raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                    _('0 or 1'), ics_count)
        if iss_count > 1:
            raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                    _('0 or 1'), iss_count)
        if ics_count == 1:
            datanode_ng = u.get_node_groups(cluster, "HDFS_DATANODE")
            impalad_ng = u.get_node_groups(cluster, "IMPALAD")
            datanodes = set(ng.id for ng in datanode_ng)
            impalads = set(ng.id for ng in impalad_ng)

            if datanodes != impalads:
                raise ex.InvalidClusterTopology(
                    _("IMPALAD must be installed on every HDFS_DATANODE"))

            if iss_count != 1:
                raise ex.RequiredServiceMissingException(
                    'IMPALA_STATESTORE', required_by='IMPALA')
            if id_count < 1:
                raise ex.RequiredServiceMissingException(
                    'IMPALAD', required_by='IMPALA')
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='IMPALA')
            if hms_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HIVE_METASTORE', required_by='IMPALA')
Exemple #5
0
    def validate(self, cluster):
        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        # validate Spark Master Node and Spark Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"), sl_count)
Exemple #6
0
    def validate(self, cluster):
        nn_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        rep_factor = c_helper.get_config_value('HDFS', "dfs.replication",
                                               cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                'datanode', _('%s or more') % rep_factor, dn_count,
                _('Number of %(dn)s instances should not be less '
                  'than %(replication)s')
                % {'dn': 'datanode', 'replication': 'dfs.replication'})

        # validate Spark Master Node and Spark Slaves
        sm_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"),
                                                    sl_count)
Exemple #7
0
    def validate(self, cluster):
        # validate Storm Master Node and Storm Slaves
        sm_count = sum([ng.count for ng in utils.get_node_groups(cluster, "nimbus")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Storm nimbus")

        sl_count = sum([ng.count for ng in utils.get_node_groups(cluster, "supervisor")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Storm supervisor", _("1 or more"), sl_count)
Exemple #8
0
    def test_get_node_groups(self):
        res = pu.get_node_groups(self.cluster)
        self.assertEqual([
            FakeNodeGroup(["node_process1"]),
            FakeNodeGroup(["node_process2"]),
            FakeNodeGroup(["node_process3"]),
        ], res)

        res = pu.get_node_groups(self.cluster, "node_process1")
        self.assertEqual([FakeNodeGroup(["node_process1"])], res)

        res = pu.get_node_groups(self.cluster, "node_process")
        self.assertEqual([], res)
Exemple #9
0
    def validate(self, cluster):
        # validate Storm Master Node and Storm Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "nimbus")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Storm nimbus")

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "supervisor")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Storm supervisor",
                                                    _("1 or more"), sl_count)
Exemple #10
0
    def test_get_node_groups(self):
        res = pu.get_node_groups(self.cluster)
        self.assertEqual([
            FakeNodeGroup(["node_process1"]),
            FakeNodeGroup(["node_process2"]),
            FakeNodeGroup(["node_process3"]),
        ], res)

        res = pu.get_node_groups(self.cluster, "node_process1")
        self.assertEqual([
            FakeNodeGroup(["node_process1"])
        ], res)

        res = pu.get_node_groups(self.cluster, "node_process")
        self.assertEqual([], res)
Exemple #11
0
    def validate(self, cluster):
        if cluster.hadoop_version == "1.0.0":
            raise exceptions.DeprecatedException(
                _("Support for Spark version 1.0.0 is now deprecated and will"
                  " be removed in the 2016.1 release."))

        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                'datanode',
                _('%s or more') % rep_factor, dn_count,
                _('Number of %(dn)s instances should not be less '
                  'than %(replication)s') % {
                      'dn': 'datanode',
                      'replication': 'dfs.replication'
                  })

        # validate Spark Master Node and Spark Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"), sl_count)
Exemple #12
0
    def validate(self, cluster):
        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        jt_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "jobtracker")])

        if jt_count not in [0, 1]:
            raise ex.InvalidComponentCountException("jobtracker", _('0 or 1'),
                                                    jt_count)

        oozie_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "oozie")])

        if oozie_count not in [0, 1]:
            raise ex.InvalidComponentCountException("oozie", _('0 or 1'),
                                                    oozie_count)

        hive_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "hiveserver")])
        if jt_count == 0:

            tt_count = sum([
                ng.count
                for ng in utils.get_node_groups(cluster, "tasktracker")
            ])
            if tt_count > 0:
                raise ex.RequiredServiceMissingException(
                    "jobtracker", required_by="tasktracker")

            if oozie_count > 0:
                raise ex.RequiredServiceMissingException("jobtracker",
                                                         required_by="oozie")

            if hive_count > 0:
                raise ex.RequiredServiceMissingException("jobtracker",
                                                         required_by="hive")

        if hive_count not in [0, 1]:
            raise ex.InvalidComponentCountException("hive", _('0 or 1'),
                                                    hive_count)
    def validate(self, cluster):
        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                'datanode',
                _('%s or more') % rep_factor, dn_count,
                _('Number of %(dn)s instances should not be less '
                  'than %(replication)s') % {
                      'dn': 'datanode',
                      'replication': 'dfs.replication'
                  })

        # validate Spark Master Node and Spark Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count < 1:
            raise ex.RequiredServiceMissingException("Spark master")

        if sm_count >= 2:
            raise ex.InvalidComponentCountException("Spark master", "1",
                                                    sm_count)

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"), sl_count)
Exemple #14
0
    def validate(self, cluster):
        nn_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        jt_count = sum([ng.count for ng
                        in utils.get_node_groups(cluster, "jobtracker")])

        if jt_count > 1:
            raise ex.InvalidComponentCountException("jobtracker", _('0 or 1'),
                                                    jt_count)

        oozie_count = sum([ng.count for ng
                           in utils.get_node_groups(cluster, "oozie")])

        if oozie_count > 1:
            raise ex.InvalidComponentCountException("oozie", _('0 or 1'),
                                                    oozie_count)

        hive_count = sum([ng.count for ng
                          in utils.get_node_groups(cluster, "hiveserver")])
        if jt_count == 0:

            tt_count = sum([ng.count for ng
                            in utils.get_node_groups(cluster, "tasktracker")])
            if tt_count > 0:
                raise ex.RequiredServiceMissingException(
                    "jobtracker", required_by="tasktracker")

            if oozie_count > 0:
                raise ex.RequiredServiceMissingException(
                    "jobtracker", required_by="oozie")

            if hive_count > 0:
                raise ex.RequiredServiceMissingException(
                    "jobtracker", required_by="hive")

        if hive_count > 1:
            raise ex.InvalidComponentCountException("hive", _('0 or 1'),
                                                    hive_count)
 def get_node_groups(self, node_process=None):
     name = _get_node_process_name(node_process)
     return u.get_node_groups(self.cluster, name)
 def get_node_groups(self, node_process=None):
     name = _get_node_process_name(node_process)
     return u.get_node_groups(self.cluster, name)
Exemple #17
0
 def test_get_node_groups(self):
     self.assertEqual(self.c1.node_groups, u.get_node_groups(self.c1))
     self.assertEqual([], u.get_node_groups(self.c1, "wrong-process"))
     self.assertEqual([self.ng2, self.ng3],
                      u.get_node_groups(self.c1, 'dn'))
Exemple #18
0
 def validate_scaling(self, cluster, existing, additional):
     vl.validate_additional_ng_scaling(cluster, additional)
     vl.validate_existing_ng_scaling(self.pctx, cluster, existing)
     zk_ng = utils.get_node_groups(cluster, "zookeeper")
     if zk_ng:
         vl.validate_zookeeper_node_count(zk_ng, existing, additional)
Exemple #19
0
 def test_get_node_groups(self):
     self.assertEqual(u.get_node_groups(self.c1), self.c1.node_groups)
     self.assertEqual(u.get_node_groups(self.c1, "wrong-process"), [])
     self.assertEqual(u.get_node_groups(self.c1, 'dn'),
                      [self.ng2, self.ng3])
 def get_node_groups(self, node_process=None):
     if node_process is not None:
         node_process = su.get_node_process_name(node_process)
     return u.get_node_groups(self.cluster, node_process)
Exemple #21
0
def validate_cluster_creating(cluster):
    mng_count = _get_inst_count(cluster, "CLOUDERA_MANAGER")
    if mng_count != 1:
        raise ex.InvalidComponentCountException("CLOUDERA_MANAGER", 1, mng_count)

    zk_count = _get_inst_count(cluster, "ZOOKEEPER_SERVER")
    nn_count = _get_inst_count(cluster, "HDFS_NAMENODE")
    if nn_count != 1:
        raise ex.InvalidComponentCountException("HDFS_NAMENODE", 1, nn_count)

    snn_count = _get_inst_count(cluster, "HDFS_SECONDARYNAMENODE")
    if snn_count != 1:
        raise ex.InvalidComponentCountException("HDFS_SECONDARYNAMENODE", 1, snn_count)

    dn_count = _get_inst_count(cluster, "HDFS_DATANODE")
    replicas = PU.get_config_value("HDFS", "dfs_replication", cluster)
    if dn_count < replicas:
        raise ex.InvalidComponentCountException(
            "HDFS_DATANODE", replicas, dn_count, _("Number of datanodes must be not less than dfs_replication.")
        )

    jn_count = _get_inst_count(cluster, "HDFS_JOURNALNODE")
    require_anti_affinity = PU.c_helper.get_required_anti_affinity(cluster)
    if jn_count > 0:
        if jn_count < 3:
            raise ex.InvalidComponentCountException("HDFS_JOURNALNODE", _("not less than 3"), jn_count)
        if not jn_count % 2:
            raise ex.InvalidComponentCountException("HDFS_JOURNALNODE", _("be odd"), jn_count)
        if zk_count < 1:
            raise ex.RequiredServiceMissingException("ZOOKEEPER", required_by="HDFS HA")
        if require_anti_affinity:
            if "HDFS_SECONDARYNAMENODE" not in _get_anti_affinity(cluster):
                raise ex.NameNodeHAConfigurationError(
                    _("HDFS_SECONDARYNAMENODE should be enabled " "in anti_affinity.")
                )
            if "HDFS_NAMENODE" not in _get_anti_affinity(cluster):
                raise ex.NameNodeHAConfigurationError(_("HDFS_NAMENODE should be enabled in anti_affinity."))

    rm_count = _get_inst_count(cluster, "YARN_RESOURCEMANAGER")
    if rm_count > 1:
        raise ex.InvalidComponentCountException("YARN_RESOURCEMANAGER", _("0 or 1"), rm_count)

    stdb_rm_count = _get_inst_count(cluster, "YARN_STANDBYRM")
    if stdb_rm_count > 1:
        raise ex.InvalidComponentCountException("YARN_STANDBYRM", _("0 or 1"), stdb_rm_count)
    if stdb_rm_count > 0:
        if rm_count < 1:
            raise ex.RequiredServiceMissingException("YARN_RESOURCEMANAGER", required_by="RM HA")
        if zk_count < 1:
            raise ex.RequiredServiceMissingException("ZOOKEEPER", required_by="RM HA")
        if require_anti_affinity:
            if "YARN_RESOURCEMANAGER" not in _get_anti_affinity(cluster):
                raise ex.ResourceManagerHAConfigurationError(
                    _("YARN_RESOURCEMANAGER should be enabled in " "anti_affinity.")
                )
            if "YARN_STANDBYRM" not in _get_anti_affinity(cluster):
                raise ex.ResourceManagerHAConfigurationError(_("YARN_STANDBYRM should be enabled in anti_affinity."))

    hs_count = _get_inst_count(cluster, "YARN_JOBHISTORY")
    if hs_count > 1:
        raise ex.InvalidComponentCountException("YARN_JOBHISTORY", _("0 or 1"), hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException("YARN_JOBHISTORY", required_by="YARN_RESOURCEMANAGER")

    nm_count = _get_inst_count(cluster, "YARN_NODEMANAGER")
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException("YARN_RESOURCEMANAGER", required_by="YARN_NODEMANAGER")

    oo_count = _get_inst_count(cluster, "OOZIE_SERVER")
    if oo_count > 1:
        raise ex.InvalidComponentCountException("OOZIE_SERVER", _("0 or 1"), oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException("HDFS_DATANODE", required_by="OOZIE_SERVER")

        if nm_count < 1:
            raise ex.RequiredServiceMissingException("YARN_NODEMANAGER", required_by="OOZIE_SERVER")

        if hs_count != 1:
            raise ex.RequiredServiceMissingException("YARN_JOBHISTORY", required_by="OOZIE_SERVER")

    hms_count = _get_inst_count(cluster, "HIVE_METASTORE")
    hvs_count = _get_inst_count(cluster, "HIVE_SERVER2")
    whc_count = _get_inst_count(cluster, "HIVE_WEBHCAT")

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException("YARN_RESOURCEMANAGER", required_by="HIVE_METASTORE")

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException("HIVE_SERVER2", required_by="HIVE_METASTORE")

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException("HIVE_METASTORE", required_by="HIVE_SERVER2")

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException("HIVE_METASTORE", required_by="HIVE_WEBHCAT")

    hue_count = _get_inst_count(cluster, "HUE_SERVER")
    if hue_count > 1:
        raise ex.InvalidComponentCountException("HUE_SERVER", _("0 or 1"), hue_count)

    shs_count = _get_inst_count(cluster, "SPARK_YARN_HISTORY_SERVER")
    if shs_count > 1:
        raise ex.InvalidComponentCountException("SPARK_YARN_HISTORY_SERVER", _("0 or 1"), shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException("YARN_RESOURCEMANAGER", required_by="SPARK_YARN_HISTORY_SERVER")

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException("OOZIE_SERVER", required_by="HUE_SERVER")

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException("HIVE_METASTORE", required_by="HUE_SERVER")

    hbm_count = _get_inst_count(cluster, "HBASE_MASTER")
    hbr_count = _get_inst_count(cluster, "HBASE_REGIONSERVER")

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException("ZOOKEEPER", required_by="HBASE")
        if hbr_count < 1:
            raise ex.InvalidComponentCountException("HBASE_REGIONSERVER", _("at least 1"), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException("HBASE_MASTER", _("at least 1"), hbm_count)

    a_count = _get_inst_count(cluster, "FLUME_AGENT")
    if a_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException("HDFS_DATANODE", required_by="FLUME_AGENT")

    snt_count = _get_inst_count(cluster, "SENTRY_SERVER")
    if snt_count > 1:
        raise ex.InvalidComponentCountException("SENTRY_SERVER", _("0 or 1"), snt_count)
    if snt_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException("HDFS_DATANODE", required_by="SENTRY_SERVER")
        if zk_count < 1:
            raise ex.RequiredServiceMissingException("ZOOKEEPER", required_by="SENTRY_SERVER")

    slr_count = _get_inst_count(cluster, "SOLR_SERVER")
    if slr_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException("HDFS_DATANODE", required_by="SOLR_SERVER")
        if zk_count < 1:
            raise ex.RequiredServiceMissingException("ZOOKEEPER", required_by="SOLR_SERVER")

    s2s_count = _get_inst_count(cluster, "SQOOP_SERVER")
    if s2s_count > 1:
        raise ex.InvalidComponentCountException("SQOOP_SERVER", _("0 or 1"), s2s_count)
    if s2s_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException("HDFS_DATANODE", required_by="SQOOP_SERVER")
        if nm_count < 1:
            raise ex.RequiredServiceMissingException("YARN_NODEMANAGER", required_by="SQOOP_SERVER")
        if hs_count != 1:
            raise ex.RequiredServiceMissingException("YARN_JOBHISTORY", required_by="SQOOP_SERVER")

    lhbi_count = _get_inst_count(cluster, "HBASE_INDEXER")
    if lhbi_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException("HDFS_DATANODE", required_by="HBASE_INDEXER")
        if zk_count < 1:
            raise ex.RequiredServiceMissingException("ZOOKEEPER", required_by="HBASE_INDEXER")
        if slr_count < 1:
            raise ex.RequiredServiceMissingException("SOLR_SERVER", required_by="HBASE_INDEXER")
        if hbm_count < 1:
            raise ex.RequiredServiceMissingException("HBASE_MASTER", required_by="HBASE_INDEXER")

    ics_count = _get_inst_count(cluster, "IMPALA_CATALOGSERVER")
    iss_count = _get_inst_count(cluster, "IMPALA_STATESTORE")
    id_count = _get_inst_count(cluster, "IMPALAD")
    if ics_count > 1:
        raise ex.InvalidComponentCountException("IMPALA_CATALOGSERVER", _("0 or 1"), ics_count)
    if iss_count > 1:
        raise ex.InvalidComponentCountException("IMPALA_STATESTORE", _("0 or 1"), iss_count)
    if ics_count == 1:
        datanode_ng = u.get_node_groups(cluster, "HDFS_DATANODE")
        impalad_ng = u.get_node_groups(cluster, "IMPALAD")
        datanodes = set(ng.id for ng in datanode_ng)
        impalads = set(ng.id for ng in impalad_ng)

        if datanodes != impalads:
            raise ex.InvalidClusterTopology(_("IMPALAD must be installed on every HDFS_DATANODE"))

        if iss_count != 1:
            raise ex.RequiredServiceMissingException("IMPALA_STATESTORE", required_by="IMPALA")
        if id_count < 1:
            raise ex.RequiredServiceMissingException("IMPALAD", required_by="IMPALA")
        if dn_count < 1:
            raise ex.RequiredServiceMissingException("HDFS_DATANODE", required_by="IMPALA")
        if hms_count < 1:
            raise ex.RequiredServiceMissingException("HIVE_METASTORE", required_by="IMPALA")

    kms_count = _get_inst_count(cluster, "KMS")
    if kms_count > 1:
        raise ex.InvalidComponentCountException("KMS", _("0 or 1"), kms_count)
Exemple #22
0
 def get_node_groups(self, node_process=None):
     return u.get_node_groups(self.cluster, node_process)
Exemple #23
0
def validate_cluster_creating(cluster):
    mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('CLOUDERA_MANAGER',
                                                1, mng_count)

    nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
                                                snn_count)

    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
    replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster)
    if dn_count < replicas:
        raise ex.InvalidComponentCountException(
            'HDFS_DATANODE', replicas, dn_count,
            _('Number of datanodes must be not less than dfs_replication.'))

    rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
    if rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                _('0 or 1'), rm_count)

    hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
    if hs_count > 1:
        raise ex.InvalidComponentCountException('YARN_JOBHISTORY',
                                                _('0 or 1'), hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    if oo_count > 1:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
    whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='HIVE_METASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException(
            'HIVE_SERVER2', required_by='HIVE_METASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException(
            'HIVE_METASTORE', required_by='HIVE_SERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException(
            'HIVE_METASTORE', required_by='HIVE_WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count > 1:
        raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count > 1:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                _('0 or 1'), shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException(
            'OOZIE_SERVER', required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException(
            'HIVE_METASTORE', required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
    hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')
    zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException(
                'HBASE_REGIONSERVER', _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                _('at least 1'), hbm_count)

    a_count = _get_inst_count(cluster, 'FLUME_AGENT')
    if a_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='FLUME_AGENT')

    snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
    if snt_count > 1:
        raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
                                                snt_count)
    if snt_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SENTRY_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='SENTRY_SERVER')

    slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
    if slr_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SOLR_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='SOLR_SERVER')

    s2s_count = _get_inst_count(cluster, 'SQOOP_SERVER')
    if s2s_count > 1:
        raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'),
                                                s2s_count)
    if s2s_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SQOOP_SERVER')
        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='SQOOP_SERVER')

    lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
    if lhbi_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='HBASE_INDEXER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='HBASE_INDEXER')
        if slr_count < 1:
            raise ex.RequiredServiceMissingException(
                'SOLR_SERVER', required_by='HBASE_INDEXER')
        if hbm_count < 1:
            raise ex.RequiredServiceMissingException(
                'HBASE_MASTER', required_by='HBASE_INDEXER')

    ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
    iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
    id_count = _get_inst_count(cluster, 'IMPALAD')
    if ics_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                _('0 or 1'), ics_count)
    if iss_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                _('0 or 1'), iss_count)
    if ics_count == 1:
        datanode_ng = u.get_node_groups(cluster, "HDFS_DATANODE")
        impalad_ng = u.get_node_groups(cluster, "IMPALAD")
        datanodes = set(ng.id for ng in datanode_ng)
        impalads = set(ng.id for ng in impalad_ng)

        if datanodes != impalads:
            raise ex.InvalidClusterTopology(
                _("IMPALAD must be installed on every HDFS_DATANODE"))

        if iss_count != 1:
            raise ex.RequiredServiceMissingException(
                'IMPALA_STATESTORE', required_by='IMPALA')
        if id_count < 1:
            raise ex.RequiredServiceMissingException(
                'IMPALAD', required_by='IMPALA')
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='IMPALA')
        if hms_count < 1:
            raise ex.RequiredServiceMissingException(
                'HIVE_METASTORE', required_by='IMPALA')
Exemple #24
0
 def test_get_node_groups(self):
     self.assertEqual(u.get_node_groups(self.c1), self.c1.node_groups)
     self.assertEqual(u.get_node_groups(self.c1, "wrong-process"), [])
     self.assertEqual(u.get_node_groups(self.c1, 'dn'),
                      [self.ng2, self.ng3])
 def get_node_groups(self, node_process=None):
     if node_process is not None:
         node_process = su.get_node_process_name(node_process)
     return u.get_node_groups(self.cluster, node_process)
Exemple #26
0
def _get_inst_count(cluster, process):
    return sum([ng.count for ng in u.get_node_groups(cluster, process)])
Exemple #27
0
 def test_get_node_groups(self):
     self.assertEqual(self.c1.node_groups, u.get_node_groups(self.c1))
     self.assertEqual([], u.get_node_groups(self.c1, "wrong-process"))
     self.assertEqual([self.ng2, self.ng3],
                      u.get_node_groups(self.c1, 'dn'))
Exemple #28
0
 def get_inst_count(cls, cluster, process):
     return sum([ng.count for ng in u.get_node_groups(cluster, process)])
Exemple #29
0
 def get_node_groups(self, node_process=None):
     return u.get_node_groups(self.cluster, node_process)