コード例 #1
0
    def _hdfs_ha_validation(cls, cluster):
        jn_count = cls.get_inst_count(cluster, 'HDFS_JOURNALNODE')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')

        require_anti_affinity = cls.PU.c_helper.get_required_anti_affinity(
            cluster)

        if jn_count > 0:
            if jn_count < 3:
                raise ex.InvalidComponentCountException(
                    'HDFS_JOURNALNODE', _('not less than 3'), jn_count)
            if not jn_count % 2:
                raise ex.InvalidComponentCountException(
                    'HDFS_JOURNALNODE', _('be odd'), jn_count)
            if zk_count < 1:
                raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                         required_by='HDFS HA')
            if require_anti_affinity:
                if 'HDFS_SECONDARYNAMENODE' not in \
                        cls._get_anti_affinity(cluster):
                    raise ex.NameNodeHAConfigurationError(
                        _('HDFS_SECONDARYNAMENODE should be enabled '
                          'in anti_affinity.'))
                if 'HDFS_NAMENODE' not in cls._get_anti_affinity(cluster):
                    raise ex.NameNodeHAConfigurationError(
                        _('HDFS_NAMENODE should be enabled in anti_affinity.'))
コード例 #2
0
ファイル: versionhandler.py プロジェクト: suriya2612/sahara
    def _hdfs_ha_update_host_component(self, hac, host, component, state):

        update_host_component_url = ('http://{0}/api/v1/clusters/{1}'
                                     '/hosts/{2}/host_components/{3}').format(
                                         hac['ambari_info'].get_address(),
                                         hac['name'], host, component)
        component_state = {"HostRoles": {"state": state}}
        body = json.dumps(component_state)

        result = self._put(update_host_component_url,
                           hac['ambari_info'], data=body)

        if result.status_code == 202:
            json_result = json.loads(result.text)
            request_id = json_result['Requests']['id']
            success = self._wait_for_async_request(self._get_async_request_uri(
                hac['ambari_info'], hac['name'], request_id),
                hac['ambari_info'])
            if success:
                LOG.info(_LI("HDFS-HA: Host component updated successfully: "
                             "{0} {1}").format(host, component))
            else:
                LOG.critical(_LC("HDFS-HA: Host component update failed: "
                                 "{0} {1}").format(host, component))
                raise ex.NameNodeHAConfigurationError(
                    'Configuring HDFS HA failed. %s' % result.text)
        elif result.status_code != 200:
            LOG.error(
                _LE('Configuring HDFS HA failed. {0}').format(result.text))
            raise ex.NameNodeHAConfigurationError(
                'Configuring HDFS HA failed. %s' % result.text)
コード例 #3
0
ファイル: versionhandler.py プロジェクト: suriya2612/sahara
    def _hdfs_ha_add_host_component(self, hac, host, component):
        add_host_component_url = ('http://{0}/api/v1/clusters/{1}'
                                  '/hosts/{2}/host_components/{3}').format(
                                      hac['ambari_info'].get_address(),
                                      hac['name'], host, component)

        result = self._post(add_host_component_url, hac['ambari_info'])
        if result.status_code != 201:
            LOG.error(_LE('Configuring HDFS HA failed. %s'), result.text)
            raise ex.NameNodeHAConfigurationError(
                'Configuring HDFS HA failed. %s' % result.text)
コード例 #4
0
ファイル: versionhandler.py プロジェクト: suriya2612/sahara
    def _hdfs_ha_get_config(self, hac, config_name, tag):

        config_url = ('http://{0}/api/v1/clusters/{1}'
                      '/configurations?type={2}&tag={3}').format(
                          hac['ambari_info'].get_address(), hac['name'],
                          config_name, tag)

        result = self._get(config_url, hac['ambari_info'])
        if result.status_code == 200:
            json_result = json.loads(result.text)
            items = json_result['items']
            return items[0]['properties']
        else:
            LOG.error(
                _LE('Configuring HDFS HA failed. {0}').format(result.text))
            raise ex.NameNodeHAConfigurationError(
                'Configuring HDFS HA failed. %s' % result.text)
コード例 #5
0
ファイル: versionhandler.py プロジェクト: suriya2612/sahara
    def _hdfs_ha_put_config(self, hac, config_name, tag, properties):

        config_url = ('http://{0}/api/v1/clusters/{1}').format(
            hac['ambari_info'].get_address(), hac['name'])

        body = {}
        clusters = {}
        body['Clusters'] = clusters
        body['Clusters']['desired_config'] = {}
        body['Clusters']['desired_config']['type'] = config_name
        body['Clusters']['desired_config']['tag'] = tag
        body['Clusters']['desired_config']['properties'] = properties

        LOG.debug(("body: %s") % (body))

        result = self._put(config_url, hac['ambari_info'],
                           data=json.dumps(body))
        if result.status_code != 200:
            LOG.error(
                _LE('Configuring HDFS HA failed. {0}').format(result.text))
            raise ex.NameNodeHAConfigurationError(
                'Configuring HDFS HA failed. %s' % result.text)
コード例 #6
0
ファイル: validation.py プロジェクト: uladz/sahara
def validate_cluster_creating(cluster):
    mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
    if mng_count != 1:
        raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                mng_count)

    zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')
    nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
    if nn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)

    snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
    if snn_count != 1:
        raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
                                                snn_count)

    dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
    replicas = PU.get_config_value('HDFS', 'dfs_replication', cluster)
    if dn_count < replicas:
        raise ex.InvalidComponentCountException(
            'HDFS_DATANODE', replicas, dn_count,
            _('Number of datanodes must be not less than dfs_replication.'))

    jn_count = _get_inst_count(cluster, 'HDFS_JOURNALNODE')
    require_anti_affinity = PU.c_helper.get_required_anti_affinity(cluster)
    if jn_count > 0:
        if jn_count < 3:
            raise ex.InvalidComponentCountException('HDFS_JOURNALNODE',
                                                    _('not less than 3'),
                                                    jn_count)
        if not jn_count % 2:
            raise ex.InvalidComponentCountException('HDFS_JOURNALNODE',
                                                    _('be odd'), jn_count)
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HDFS HA')
        if require_anti_affinity:
            if 'HDFS_SECONDARYNAMENODE' not in _get_anti_affinity(cluster):
                raise ex.NameNodeHAConfigurationError(
                    _('HDFS_SECONDARYNAMENODE should be enabled '
                      'in anti_affinity.'))
            if 'HDFS_NAMENODE' not in _get_anti_affinity(cluster):
                raise ex.NameNodeHAConfigurationError(
                    _('HDFS_NAMENODE should be enabled in anti_affinity.'))

    rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
    if rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                _('0 or 1'), rm_count)

    stdb_rm_count = _get_inst_count(cluster, 'YARN_STANDBYRM')
    if stdb_rm_count > 1:
        raise ex.InvalidComponentCountException('YARN_STANDBYRM', _('0 or 1'),
                                                stdb_rm_count)
    if stdb_rm_count > 0:
        if rm_count < 1:
            raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                     required_by='RM HA')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='RM HA')
        if require_anti_affinity:
            if 'YARN_RESOURCEMANAGER' not in _get_anti_affinity(cluster):
                raise ex.ResourceManagerHAConfigurationError(
                    _('YARN_RESOURCEMANAGER should be enabled in '
                      'anti_affinity.'))
            if 'YARN_STANDBYRM' not in _get_anti_affinity(cluster):
                raise ex.ResourceManagerHAConfigurationError(
                    _('YARN_STANDBYRM should be enabled in anti_affinity.'))

    hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
    if hs_count > 1:
        raise ex.InvalidComponentCountException('YARN_JOBHISTORY', _('0 or 1'),
                                                hs_count)

    if rm_count > 0 and hs_count < 1:
        raise ex.RequiredServiceMissingException(
            'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

    nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
    if rm_count == 0:
        if nm_count > 0:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')

    oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
    if oo_count > 1:
        raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
                                                oo_count)

    if oo_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='OOZIE_SERVER')

        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='OOZIE_SERVER')

        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='OOZIE_SERVER')

    hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
    hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
    whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')

    if hms_count and rm_count < 1:
        raise ex.RequiredServiceMissingException('YARN_RESOURCEMANAGER',
                                                 required_by='HIVE_METASTORE')

    if hms_count and not hvs_count:
        raise ex.RequiredServiceMissingException('HIVE_SERVER2',
                                                 required_by='HIVE_METASTORE')

    if hvs_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_SERVER2')

    if whc_count and not hms_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HIVE_WEBHCAT')

    hue_count = _get_inst_count(cluster, 'HUE_SERVER')
    if hue_count > 1:
        raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                hue_count)

    shs_count = _get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
    if shs_count > 1:
        raise ex.InvalidComponentCountException('SPARK_YARN_HISTORY_SERVER',
                                                _('0 or 1'), shs_count)
    if shs_count and not rm_count:
        raise ex.RequiredServiceMissingException(
            'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')

    if oo_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                 required_by='HUE_SERVER')

    if hms_count < 1 and hue_count:
        raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                 required_by='HUE_SERVER')

    hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
    hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')

    if hbm_count >= 1:
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='HBASE')
        if hbr_count < 1:
            raise ex.InvalidComponentCountException('HBASE_REGIONSERVER',
                                                    _('at least 1'), hbr_count)
    elif hbr_count >= 1:
        raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                _('at least 1'), hbm_count)

    a_count = _get_inst_count(cluster, 'FLUME_AGENT')
    if a_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='FLUME_AGENT')

    snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
    if snt_count > 1:
        raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
                                                snt_count)
    if snt_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SENTRY_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='SENTRY_SERVER')

    slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
    if slr_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='SOLR_SERVER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                     required_by='SOLR_SERVER')

    s2s_count = _get_inst_count(cluster, 'SQOOP_SERVER')
    if s2s_count > 1:
        raise ex.InvalidComponentCountException('SQOOP_SERVER', _('0 or 1'),
                                                s2s_count)
    if s2s_count == 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='SQOOP_SERVER')
        if nm_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
        if hs_count != 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='SQOOP_SERVER')

    lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
    if lhbi_count >= 1:
        if dn_count < 1:
            raise ex.RequiredServiceMissingException(
                'HDFS_DATANODE', required_by='HBASE_INDEXER')
        if zk_count < 1:
            raise ex.RequiredServiceMissingException(
                'ZOOKEEPER', required_by='HBASE_INDEXER')
        if slr_count < 1:
            raise ex.RequiredServiceMissingException(
                'SOLR_SERVER', required_by='HBASE_INDEXER')
        if hbm_count < 1:
            raise ex.RequiredServiceMissingException(
                'HBASE_MASTER', required_by='HBASE_INDEXER')

    ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
    iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
    id_count = _get_inst_count(cluster, 'IMPALAD')
    if ics_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                _('0 or 1'), ics_count)
    if iss_count > 1:
        raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                _('0 or 1'), iss_count)
    if ics_count == 1:
        datanodes = set(u.get_instances(cluster, "HDFS_DATANODE"))
        impalads = set(u.get_instances(cluster, "IMPALAD"))
        if len(datanodes ^ impalads) > 0:
            raise ex.InvalidClusterTopology(
                _("IMPALAD must be installed on every HDFS_DATANODE"))

        if iss_count != 1:
            raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                     required_by='IMPALA')
        if id_count < 1:
            raise ex.RequiredServiceMissingException('IMPALAD',
                                                     required_by='IMPALA')
        if dn_count < 1:
            raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                     required_by='IMPALA')
        if hms_count < 1:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='IMPALA')

    kms_count = _get_inst_count(cluster, 'KMS')
    if kms_count > 1:
        raise ex.InvalidComponentCountException('KMS', _('0 or 1'), kms_count)