コード例 #1
0
    def validate(self, cluster):
        # validate Storm Master Node and Storm Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "nimbus")])

        if sm_count < 1:
            raise ex.RequiredServiceMissingException("Storm nimbus")

        if sm_count >= 2:
            raise ex.InvalidComponentCountException("Storm nimbus", "1",
                                                    sm_count)

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "supervisor")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Storm supervisor",
                                                    _("1 or more"), sl_count)
コード例 #2
0
def _check_hdfs(cluster):
    nn_count = utils.get_instances_count(cluster, common.NAMENODE)
    dn_count = utils.get_instances_count(cluster, common.DATANODE)

    if cluster.cluster_configs.get("general", {}).get(common.NAMENODE_HA):
        _check_zk_ha(cluster)
        _check_jn_ha(cluster)

        if nn_count != 2:
            raise ex.InvalidComponentCountException(common.NAMENODE, 2,
                                                    nn_count)
    else:
        if nn_count != 1:
            raise ex.InvalidComponentCountException(common.NAMENODE, 1,
                                                    nn_count)
    if dn_count == 0:
        raise ex.InvalidComponentCountException(
            common.DATANODE, _("1 or more"), dn_count)
コード例 #3
0
ファイル: plugin.py プロジェクト: uladz/sahara
    def validate(self, cluster):
        if cluster.hadoop_version == "1.0.0":
            raise exceptions.DeprecatedException(
                _("Support for Spark version 1.0.0 is now deprecated and will"
                  " be removed in the 2016.1 release."))

        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                'datanode',
                _('%s or more') % rep_factor, dn_count,
                _('Number of %(dn)s instances should not be less '
                  'than %(replication)s') % {
                      'dn': 'datanode',
                      'replication': 'dfs.replication'
                  })

        # validate Spark Master Node and Spark Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"), sl_count)
コード例 #4
0
    def validate(self, cluster):
        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        jt_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "jobtracker")])

        if jt_count not in [0, 1]:
            raise ex.InvalidComponentCountException("jobtracker", _('0 or 1'),
                                                    jt_count)

        oozie_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "oozie")])

        if oozie_count not in [0, 1]:
            raise ex.InvalidComponentCountException("oozie", _('0 or 1'),
                                                    oozie_count)

        hive_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "hiveserver")])
        if jt_count == 0:

            tt_count = sum([
                ng.count
                for ng in utils.get_node_groups(cluster, "tasktracker")
            ])
            if tt_count > 0:
                raise ex.RequiredServiceMissingException(
                    "jobtracker", required_by="tasktracker")

            if oozie_count > 0:
                raise ex.RequiredServiceMissingException("jobtracker",
                                                         required_by="oozie")

            if hive_count > 0:
                raise ex.RequiredServiceMissingException("jobtracker",
                                                         required_by="hive")

        if hive_count not in [0, 1]:
            raise ex.InvalidComponentCountException("hive", _('0 or 1'),
                                                    hive_count)
コード例 #5
0
    def validate(self, cluster):
        nn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"),
                                                    nn_count)

        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                'datanode',
                _('%s or more') % rep_factor, dn_count,
                _('Number of %(dn)s instances should not be less '
                  'than %(replication)s') % {
                      'dn': 'datanode',
                      'replication': 'dfs.replication'
                  })

        # validate Spark Master Node and Spark Slaves
        sm_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count < 1:
            raise ex.RequiredServiceMissingException("Spark master")

        if sm_count >= 2:
            raise ex.InvalidComponentCountException("Spark master", "1",
                                                    sm_count)

        sl_count = sum(
            [ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave",
                                                    _("1 or more"), sl_count)
コード例 #6
0
ファイル: validation.py プロジェクト: xinw1012/sahara
def validate_cluster_creating(pctx, cluster):
    nn_count = _get_inst_count(cluster, 'namenode')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('namenode', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'secondarynamenode')
    if snn_count not in [0, 1]:
        raise ex.InvalidComponentCountException('secondarynamenode',
                                                _('0 or 1'), snn_count)

    rm_count = _get_inst_count(cluster, 'resourcemanager')
    if rm_count not in [0, 1]:
        raise ex.InvalidComponentCountException('resourcemanager', _('0 or 1'),
                                                rm_count)

    hs_count = _get_inst_count(cluster, 'historyserver')
    if hs_count not in [0, 1]:
        raise ex.InvalidComponentCountException('historyserver', _('0 or 1'),
                                                hs_count)

    nm_count = _get_inst_count(cluster, 'nodemanager')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException('resourcemanager',
                                                     required_by='nodemanager')

    oo_count = _get_inst_count(cluster, 'oozie')
    dn_count = _get_inst_count(cluster, 'datanode')
    if oo_count not in [0, 1]:
        raise ex.InvalidComponentCountException('oozie', _('0 or 1'), oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('datanode',
                                                     required_by='oozie')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException('nodemanager',
                                                     required_by='oozie')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException('historyserver',
                                                     required_by='oozie')

    rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)
    if dn_count < rep_factor:
        raise ex.InvalidComponentCountException(
            'datanode', rep_factor, dn_count,
            _('Number of datanodes must be '
              'not less than '
              'dfs.replication.'))

    hive_count = _get_inst_count(cluster, 'hiveserver')
    if hive_count not in [0, 1]:
        raise ex.InvalidComponentCountException('hive', _('0 or 1'),
                                                hive_count)
コード例 #7
0
ファイル: edp_engine.py プロジェクト: madar010/mad
    def validate_job_execution(self, cluster, job, data):
        if not self.edp_supported(cluster.hadoop_version):
            raise ex.InvalidDataException(
                _('Cloudera {base} or higher required to run {type}'
                  'jobs').format(base=self.edp_base_version, type=job.type))

        shs_count = u.get_instances_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
        if shs_count != 1:
            raise pl_ex.InvalidComponentCountException(
                'SPARK_YARN_HISTORY_SERVER', '1', shs_count)

        super(EdpSparkEngine, self).validate_job_execution(cluster, job, data)
コード例 #8
0
ファイル: validation.py プロジェクト: uladz/sahara
def _check_hbase(cluster):
    hm_count = utils.get_instances_count(cluster, common.HBASE_MASTER)
    hr_count = utils.get_instances_count(cluster, common.HBASE_REGIONSERVER)
    if hm_count > 1:
        raise ex.InvalidComponentCountException(common.HBASE_MASTER,
                                                _("0 or 1"), hm_count)
    if hm_count == 1 and hr_count == 0:
        raise ex.RequiredServiceMissingException(
            common.HBASE_REGIONSERVER, required_by=common.HBASE_MASTER)
    if hr_count > 0 and hm_count == 0:
        raise ex.RequiredServiceMissingException(
            common.HBASE_MASTER, required_by=common.HBASE_REGIONSERVER)
コード例 #9
0
ファイル: edp_engine.py プロジェクト: madar010/mad
    def validate_job_execution(self, cluster, job, data):
        if not self.edp_supported(cluster.hadoop_version):
            raise exc.InvalidDataException(
                _('Ambari plugin of {base} or higher required to run {type} '
                  'jobs').format(base=EDPSparkEngine.edp_base_version,
                                 type=job.type))

        spark_nodes_count = plugin_utils.get_instances_count(
            cluster, p_common.SPARK_JOBHISTORYSERVER)
        if spark_nodes_count != 1:
            raise pex.InvalidComponentCountException(
                p_common.SPARK_JOBHISTORYSERVER, '1', spark_nodes_count)

        super(EDPSparkEngine, self).validate_job_execution(cluster, job, data)
コード例 #10
0
    def _sentry_validation(cls, cluster):

        snt_count = cls.get_inst_count(cluster, 'SENTRY_SERVER')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')

        if snt_count > 1:
            raise ex.InvalidComponentCountException('SENTRY_SERVER',
                                                    _('0 or 1'), snt_count)
        if snt_count == 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='SENTRY_SERVER')
            if zk_count < 1:
                raise ex.RequiredServiceMissingException(
                    'ZOOKEEPER', required_by='SENTRY_SERVER')
コード例 #11
0
    def _basic_validation(cls, cluster):

        mng_count = cls.get_inst_count(cluster, 'CLOUDERA_MANAGER')
        if mng_count != 1:
            raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                    mng_count)

        nn_count = cls.get_inst_count(cluster, 'HDFS_NAMENODE')
        if nn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1,
                                                    nn_count)

        snn_count = cls.get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
        if snn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE',
                                                    1, snn_count)
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        replicas = cls.PU.get_config_value('HDFS', 'dfs_replication', cluster)
        if dn_count < replicas:
            raise ex.InvalidComponentCountException(
                'HDFS_DATANODE', replicas, dn_count,
                _('Number of datanodes must be not'
                  ' less than dfs_replication.'))

        du_reserved = cls.PU.get_config_value('DATANODE',
                                              'dfs_datanode_du_reserved',
                                              cluster)
        du_reserved = du_reserved / 1073741824.
        for node_group in cluster.node_groups:
            volume_size = node_group.volumes_size
            if volume_size and volume_size < du_reserved:
                raise ex.InvalidVolumeSizeException(volume_size, du_reserved)

        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
        if rm_count > 1:
            raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                    _('0 or 1'), rm_count)

        hs_count = cls.get_inst_count(cluster, 'YARN_JOBHISTORY')
        if hs_count > 1:
            raise ex.InvalidComponentCountException('YARN_JOBHISTORY',
                                                    _('0 or 1'), hs_count)

        if rm_count > 0 and hs_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

        nm_count = cls.get_inst_count(cluster, 'YARN_NODEMANAGER')
        if rm_count == 0:
            if nm_count > 0:
                raise ex.RequiredServiceMissingException(
                    'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')
コード例 #12
0
    def _sqoop_validation(cls, cluster):

        s2s_count = cls.get_inst_count(cluster, 'SQOOP_SERVER')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        hs_count = cls.get_inst_count(cluster, 'YARN_JOBHISTORY')
        nm_count = cls.get_inst_count(cluster, 'YARN_NODEMANAGER')

        if s2s_count > 1:
            raise ex.InvalidComponentCountException('SQOOP_SERVER',
                                                    _('0 or 1'), s2s_count)
        if s2s_count == 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='SQOOP_SERVER')
            if nm_count < 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
            if hs_count != 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_JOBHISTORY', required_by='SQOOP_SERVER')
コード例 #13
0
    def _oozie_validation(cls, cluster):

        oo_count = cls._get_inst_count(cluster, 'OOZIE_SERVER')
        dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
        nm_count = cls._get_inst_count(cluster, 'YARN_NODEMANAGER')
        hs_count = cls._get_inst_count(cluster, 'YARN_JOBHISTORY')

        if oo_count > 1:
            raise ex.InvalidComponentCountException('OOZIE_SERVER',
                                                    _('0 or 1'), oo_count)

        if oo_count == 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='OOZIE_SERVER')

            if nm_count < 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

            if hs_count != 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_JOBHISTORY', required_by='OOZIE_SERVER')
コード例 #14
0
ファイル: validation.py プロジェクト: stackhpc/sahara
    def _basic_validation(cls, cluster):

        mng_count = cls.get_inst_count(cluster, 'CLOUDERA_MANAGER')
        if mng_count != 1:
            raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                    mng_count)

        nn_count = cls.get_inst_count(cluster, 'HDFS_NAMENODE')
        if nn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1,
                                                    nn_count)

        snn_count = cls.get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
        if snn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE',
                                                    1, snn_count)
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        replicas = cls.PU.get_config_value('HDFS', 'dfs_replication', cluster)
        if dn_count < replicas:
            raise ex.InvalidComponentCountException(
                'HDFS_DATANODE', replicas, dn_count,
                _('Number of datanodes must be not'
                  ' less than dfs_replication.'))

        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
        if rm_count > 1:
            raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                    _('0 or 1'), rm_count)

        hs_count = cls.get_inst_count(cluster, 'YARN_JOBHISTORY')
        if hs_count > 1:
            raise ex.InvalidComponentCountException('YARN_JOBHISTORY',
                                                    _('0 or 1'), hs_count)

        if rm_count > 0 and hs_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

        nm_count = cls.get_inst_count(cluster, 'YARN_NODEMANAGER')
        if rm_count == 0:
            if nm_count > 0:
                raise ex.RequiredServiceMissingException(
                    'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')
コード例 #15
0
ファイル: validation.py プロジェクト: suriya2612/sahara
def validate_cluster_creating(cluster):
    if not cmu.have_cm_api_libs():
        LOG.error(
            _LE("For provisioning cluster with CDH plugin install"
                "'cm_api' package version 6.0.2 or later."))
        raise ex.HadoopProvisionError(_("'cm_api' is not installed."))

    mng_count = _get_inst_count(cluster, 'MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('MANAGER', 1, mng_count)

    nn_count = _get_inst_count(cluster, 'NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('SECONDARYNAMENODE', 1,
                                                snn_count)

    rm_count = _get_inst_count(cluster, 'RESOURCEMANAGER')
    if rm_count not in [0, 1]:
        raise ex.InvalidComponentCountException('RESOURCEMANAGER', _('0 or 1'),
                                                rm_count)

    hs_count = _get_inst_count(cluster, 'JOBHISTORY')
    if hs_count not in [0, 1]:
        raise ex.InvalidComponentCountException('JOBHISTORY', _('0 or 1'),
                                                hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException('JOBHISTORY',
                                                 required_by='RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException('RESOURCEMANAGER',
                                                     required_by='NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    dn_count = _get_inst_count(cluster, 'DATANODE')
    if oo_count not in [0, 1]:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVEMETASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVESERVER2')
    whc_count = _get_inst_count(cluster, 'WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException('RESOURCEMANAGER',
                                                 required_by='HIVEMETASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException('HIVESERVER2',
                                                 required_by='HIVEMETASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVEMETASTORE',
                                                 required_by='HIVESERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVEMETASTORE',
                                                 required_by='WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count not in [0, 1]:
        raise ex.InvalidComponentCountException('HUE_SERVER', '0 or 1',
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count not in [0, 1]:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                '0 or 1', shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                 required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('HIVEMETASTORE',
                                                 required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'MASTER')
    hbr_count = _get_inst_count(cluster, 'REGIONSERVER')
    zk_count = _get_inst_count(cluster, 'SERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException('REGIONSERVER',
                                                    _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('MASTER', _('at least 1'),
                                                hbm_count)
コード例 #16
0
def validate_cluster_creating(cluster):
    mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                mng_count)

    nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
                                                snn_count)

    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
    replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster)
    if dn_count < replicas:
        raise ex.InvalidComponentCountException(
            'HDFS_DATANODE', replicas, dn_count,
            _('Number of datanodes must be not less than dfs_replication.'))

    rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
    if rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                _('0 or 1'), rm_count)

    hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
    if hs_count > 1:
        raise ex.InvalidComponentCountException('YARN_JOBHISTORY', _('0 or 1'),
                                                hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    if oo_count > 1:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
    whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                 required_by='HIVE_METASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException('HIVE_SERVER2',
                                                 required_by='HIVE_METASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_SERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count > 1:
        raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count > 1:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                _('0 or 1'), shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                 required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
    hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')
    zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException('HBASE_REGIONSERVER',
                                                    _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                _('at least 1'), hbm_count)

    a_count = _get_inst_count(cluster, 'FLUME_AGENT')
    if a_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='FLUME_AGENT')

    snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
    if snt_count > 1:
        raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
                                                snt_count)
    if snt_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SENTRY_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='SENTRY_SERVER')

    slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
    if slr_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='SOLR_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='SOLR_SERVER')

    s2s_count = _get_inst_count(cluster, 'SQOOP_SERVER')
    if s2s_count > 1:
        raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'),
                                                s2s_count)
    if s2s_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SQOOP_SERVER')
        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='SQOOP_SERVER')

    lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
    if lhbi_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='HBASE_INDEXER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='HBASE_INDEXER')
        if slr_count < 1:
            raise ex.RequiredServiceMissingException(
                'SOLR_SERVER', required_by='HBASE_INDEXER')
        if hbm_count < 1:
            raise ex.RequiredServiceMissingException(
                'HBASE_MASTER', required_by='HBASE_INDEXER')

    ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
    iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
    id_count = _get_inst_count(cluster, 'IMPALAD')
    if ics_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                _('0 or 1'), ics_count)
    if iss_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                _('0 or 1'), iss_count)
    if ics_count == 1:
        if iss_count != 1:
            raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                     required_by='IMPALA')
        if id_count < 1:
            raise ex.RequiredServiceMissingException('IMPALAD',
                                                     required_by='IMPALA')
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='IMPALA')
        if hms_count < 1:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='IMPALA')

    kms_count = _get_inst_count(cluster, 'KMS')
    if kms_count > 1:
        raise ex.InvalidComponentCountException('KMS', _('0 or 1'), kms_count)
コード例 #17
0
ファイル: utils.py プロジェクト: madar010/mad
def get_instance(cluster, node_process):
    instances = get_instances(cluster, node_process)
    if len(instances) > 1:
        raise ex.InvalidComponentCountException(
            node_process, _('0 or 1'), len(instances))
    return instances[0] if instances else None
コード例 #18
0
ファイル: validation.py プロジェクト: uladz/sahara
def _check_spark(cluster):
    count = utils.get_instances_count(cluster, common.SPARK_JOBHISTORYSERVER)
    if count > 1:
        raise ex.InvalidComponentCountException(common.SPARK_JOBHISTORYSERVER,
                                                _("0 or 1"), count)
コード例 #19
0
    def _kms_validation(cls, cluster):

        kms_count = cls.get_inst_count(cluster, 'KMS')
        if kms_count > 1:
            raise ex.InvalidComponentCountException('KMS', _('0 or 1'),
                                                    kms_count)
コード例 #20
0
 def validate(self, cluster_spec, cluster):
     # check for a single HBASE_SERVER
     count = cluster_spec.get_deployed_node_group_count('HBASE_MASTER')
     if count != 1:
         raise ex.InvalidComponentCountException('HBASE_MASTER', 1, count)
コード例 #21
0
 def validate(self, cluster_spec, cluster):
     # check for a single NAMENODE
     count = cluster_spec.get_deployed_node_group_count('NAMENODE')
     if count != 1:
         raise ex.InvalidComponentCountException('NAMENODE', 1, count)
コード例 #22
0
ファイル: validation.py プロジェクト: uladz/sahara
def validate_cluster_creating(cluster):
    mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                mng_count)

    zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')
    nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
                                                snn_count)

    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
    replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster)
    if dn_count < replicas:
        raise ex.InvalidComponentCountException(
            'HDFS_DATANODE', replicas, dn_count,
            _('Number of datanodes must be not less than dfs_replication.'))

    jn_count = _get_inst_count(cluster, 'HDFS_JOURNALNODE')
    require_anti_affinity = PU.c_helper.get_required_anti_affinity(cluster)
    if jn_count > 0:
        if jn_count < 3:
            raise ex.InvalidComponentCountException('HDFS_JOURNALNODE',
                                                    _('not less than 3'),
                                                    jn_count)
        if not jn_count % 2:
            raise ex.InvalidComponentCountException('HDFS_JOURNALNODE',
                                                    _('be odd'), jn_count)
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HDFS HA')
        if require_anti_affinity:
            if 'HDFS_SECONDARYNAMENODE' not in _get_anti_affinity(cluster):
                raise ex.NameNodeHAConfigurationError(
                    _('HDFS_SECONDARYNAMENODE should be enabled '
                      'in anti_affinity.'))
            if 'HDFS_NAMENODE' not in _get_anti_affinity(cluster):
                raise ex.NameNodeHAConfigurationError(
                    _('HDFS_NAMENODE should be enabled in anti_affinity.'))

    rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
    if rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                _('0 or 1'), rm_count)

    stdb_rm_count = _get_inst_count(cluster, 'YARN_STANDBYRM')
    if stdb_rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_STANDBYRM', _('0 or 1'),
                                                stdb_rm_count)
    if stdb_rm_count > 0:
        if rm_count < 1:
            raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                     required_by='RM HA')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='RM HA')
        if require_anti_affinity:
            if 'YARN_RESOURCEMANAGER' not in _get_anti_affinity(cluster):
                raise ex.ResourceManagerHAConfigurationError(
                    _('YARN_RESOURCEMANAGER should be enabled in '
                      'anti_affinity.'))
            if 'YARN_STANDBYRM' not in _get_anti_affinity(cluster):
                raise ex.ResourceManagerHAConfigurationError(
                    _('YARN_STANDBYRM should be enabled in anti_affinity.'))

    hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
    if hs_count > 1:
        raise ex.InvalidComponentCountException('YARN_JOBHISTORY', _('0 or 1'),
                                                hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    if oo_count > 1:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
    whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                 required_by='HIVE_METASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException('HIVE_SERVER2',
                                                 required_by='HIVE_METASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_SERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count > 1:
        raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count > 1:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                _('0 or 1'), shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                 required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
    hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException('HBASE_REGIONSERVER',
                                                    _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                _('at least 1'), hbm_count)

    a_count = _get_inst_count(cluster, 'FLUME_AGENT')
    if a_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='FLUME_AGENT')

    snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
    if snt_count > 1:
        raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
                                                snt_count)
    if snt_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SENTRY_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='SENTRY_SERVER')

    slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
    if slr_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='SOLR_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='SOLR_SERVER')

    s2s_count = _get_inst_count(cluster, 'SQOOP_SERVER')
    if s2s_count > 1:
        raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'),
                                                s2s_count)
    if s2s_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SQOOP_SERVER')
        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='SQOOP_SERVER')

    lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
    if lhbi_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='HBASE_INDEXER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='HBASE_INDEXER')
        if slr_count < 1:
            raise ex.RequiredServiceMissingException(
                'SOLR_SERVER', required_by='HBASE_INDEXER')
        if hbm_count < 1:
            raise ex.RequiredServiceMissingException(
                'HBASE_MASTER', required_by='HBASE_INDEXER')

    ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
    iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
    id_count = _get_inst_count(cluster, 'IMPALAD')
    if ics_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                _('0 or 1'), ics_count)
    if iss_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                _('0 or 1'), iss_count)
    if ics_count == 1:
        datanodes = set(u.get_instances(cluster, "HDFS_DATANODE"))
        impalads = set(u.get_instances(cluster, "IMPALAD"))
        if len(datanodes ^ impalads) > 0:
            raise ex.InvalidClusterTopology(
                _("IMPALAD must be installed on every HDFS_DATANODE"))

        if iss_count != 1:
            raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                     required_by='IMPALA')
        if id_count < 1:
            raise ex.RequiredServiceMissingException('IMPALAD',
                                                     required_by='IMPALA')
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='IMPALA')
        if hms_count < 1:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='IMPALA')

    kms_count = _get_inst_count(cluster, 'KMS')
    if kms_count > 1:
        raise ex.InvalidComponentCountException('KMS', _('0 or 1'), kms_count)
コード例 #23
0
def validate_cluster_creating(cluster):
    if not cmu.have_cm_api_libs():
        LOG.error(
            _LE("For provisioning cluster with CDH plugin install"
                "'cm_api' package version 6.0.2 or later."))
        raise ex.HadoopProvisionError(_("'cm_api' is not installed."))

    mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                mng_count)

    nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
                                                snn_count)

    rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
    if rm_count not in [0, 1]:
        raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                _('0 or 1'), rm_count)

    hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
    if hs_count not in [0, 1]:
        raise ex.InvalidComponentCountException('YARN_JOBHISTORY', _('0 or 1'),
                                                hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
    if oo_count not in [0, 1]:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
    whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                 required_by='HIVE_METASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException('HIVE_SERVER2',
                                                 required_by='HIVE_METASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_SERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count not in [0, 1]:
        raise ex.InvalidComponentCountException('HUE_SERVER', '0 or 1',
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count not in [0, 1]:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                '0 or 1', shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                 required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
    hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')
    zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException('HBASE_REGIONSERVER',
                                                    _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                _('at least 1'), hbm_count)

    a_count = _get_inst_count(cluster, 'FLUME_AGENT')
    if a_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='FLUME_AGENT')

    snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
    if snt_count not in [0, 1]:
        raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
                                                snt_count)
    if snt_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SENTRY_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='SENTRY_SERVER')

    slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
    if slr_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='SOLR_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='SOLR_SERVER')

    s2s_count = _get_inst_count(cluster, 'SQOOP_SERVER')
    if s2s_count not in [0, 1]:
        raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'),
                                                s2s_count)
    if s2s_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SQOOP_SERVER')
        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='SQOOP_SERVER')

    lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
    if lhbi_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='HBASE_INDEXER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='HBASE_INDEXER')
        if slr_count < 1:
            raise ex.RequiredServiceMissingException(
                'SOLR_SERVER', required_by='HBASE_INDEXER')
        if hbm_count < 1:
            raise ex.RequiredServiceMissingException(
                'HBASE_MASTER', required_by='HBASE_INDEXER')

    ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
    iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
    id_count = _get_inst_count(cluster, 'IMPALAD')
    if ics_count not in [0, 1]:
        raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                _('0 or 1'), ics_count)
    if iss_count not in [0, 1]:
        raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                _('0 or 1'), iss_count)
    if ics_count == 1:
        if iss_count != 1:
            raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                     required_by='IMPALA')
        if id_count < 1:
            raise ex.RequiredServiceMissingException('IMPALAD',
                                                     required_by='IMPALA')
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='IMPALA')
        if hms_count < 1:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='IMPALA')
コード例 #24
0
ファイル: validation.py プロジェクト: redhat-openstack/sahara
def validate_cluster_creating(cluster):
    mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                mng_count)

    nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
                                                snn_count)

    rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
    if rm_count not in [0, 1]:
        raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                _('0 or 1'), rm_count)

    hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
    if hs_count not in [0, 1]:
        raise ex.InvalidComponentCountException('YARN_JOBHISTORY', _('0 or 1'),
                                                hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
    if oo_count not in [0, 1]:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
    whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                 required_by='HIVE_METASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException('HIVE_SERVER2',
                                                 required_by='HIVE_METASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_SERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count not in [0, 1]:
        raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count not in [0, 1]:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                _('0 or 1'), shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                 required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
    hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')
    zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException('HBASE_REGIONSERVER',
                                                    _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                _('at least 1'), hbm_count)
コード例 #25
0
ファイル: validation.py プロジェクト: uladz/sahara
def _check_oozie(cluster):
    count = utils.get_instances_count(cluster, common.OOZIE_SERVER)
    if count > 1:
        raise ex.InvalidComponentCountException(common.OOZIE_SERVER,
                                                _("0 or 1"), count)
コード例 #26
0
ファイル: validation_utils.py プロジェクト: xinw1012/sahara
 def validate(cluster, component, count):
     c_info = ci.ClusterInfo(cluster, None)
     actual_count = c_info.get_instances_count(component)
     if not actual_count == count:
         raise e.InvalidComponentCountException(component, count,
                                                actual_count)
コード例 #27
0
 def validate(self, cluster_spec, cluster):
     count = cluster_spec.get_deployed_node_group_count('ZOOKEEPER_SERVER')
     if count < 1:
         raise ex.InvalidComponentCountException('ZOOKEEPER_SERVER', '1+',
                                                 count)
コード例 #28
0
 def validate(self, cluster_spec, cluster):
     count = cluster_spec.get_deployed_node_group_count('AMBARI_SERVER')
     if count != 1:
         raise ex.InvalidComponentCountException('AMBARI_SERVER', 1, count)
コード例 #29
0
    def validate_job_execution(self, cluster, job, data):
        oo_count = u.get_instances_count(cluster, 'oozie')
        if oo_count != 1:
            raise ex.InvalidComponentCountException('oozie', '1', oo_count)

        super(EdpOozieEngine, self).validate_job_execution(cluster, job, data)
コード例 #30
0
ファイル: validation_utils.py プロジェクト: madar010/mad
 def validate(cluster_context, component, count):
     actual_count = cluster_context.get_instances_count(component)
     if not actual_count == count:
         raise e.InvalidComponentCountException(component.ui_name, count,
                                                actual_count)