Пример #1
0
 def get_service(self, node_process):
     ui_name = self.get_service_name_by_node_process(node_process)
     if ui_name is None:
         raise e.PluginInvalidDataException(
             _('Service not found in services list'))
     version = self.get_chosen_service_version(ui_name)
     service = self._find_service_instance(ui_name, version)
     if service is None:
         raise e.PluginInvalidDataException(_('Can not map service'))
     return service
Пример #2
0
    def _create_config_obj(self, item, target='general', scope='cluster',
                           high_priority=False):
        def _prepare_value(value):
            if isinstance(value, str):
                return value.strip().lower()
            return value

        conf_name = _prepare_value(item.get('name', None))

        conf_value = _prepare_value(item.get('value', None))

        if not conf_name:
            raise ex.HadoopProvisionError(_("Config missing 'name'"))

        if conf_value is None:
            raise ex.PluginInvalidDataException(
                _("Config '%s' missing 'value'") % conf_name)

        if high_priority or item.get('priority', 2) == 1:
            priority = 1
        else:
            priority = 2

        return p.Config(
            name=conf_name,
            applicable_target=target,
            scope=scope,
            config_type=item.get('config_type', "string"),
            config_values=item.get('config_values', None),
            default_value=conf_value,
            is_optional=item.get('is_optional', True),
            description=item.get('description', None),
            priority=priority)
    def validate_job_execution(self, cluster, job, data):
        if not self.edp_supported(cluster.hadoop_version):
            raise ex.PluginInvalidDataException(
                _('Storm {base} required to run {type} jobs').format(
                    base=EdpPyleusEngine.edp_base_version, type=job.type))

        super(EdpPyleusEngine, self).validate_job_execution(cluster, job, data)
    def validate_job_execution(self, cluster, job, data):
        if (not self.edp_supported(cluster.hadoop_version)
                or not v_utils.get_spark_history_server(cluster)):

            raise ex.PluginInvalidDataException(
                _('Spark {base} or higher required to run {type} jobs').format(
                    base=EdpSparkEngine.edp_base_version, type=job.type))

        super(EdpSparkEngine, self).validate_job_execution(cluster, job, data)
Пример #5
0
    def validate_job_execution(self, cluster, job, data):
        if not self.edp_supported(cluster.hadoop_version):
            raise pl_ex.PluginInvalidDataException(
                _('Cloudera {base} or higher required to run {type}'
                  'jobs').format(base=self.edp_base_version, type=job.type))

        shs_count = u.get_instances_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
        if shs_count != 1:
            raise pl_ex.InvalidComponentCountException(
                'SPARK_YARN_HISTORY_SERVER', '1', shs_count)

        super(EdpSparkEngine, self).validate_job_execution(cluster, job, data)
Пример #6
0
    def validate_job_execution(self, cluster, job, data):
        if not self.edp_supported(cluster.hadoop_version):
            raise pex.PluginInvalidDataException(
                _('Ambari plugin of {base} or higher required to run {type} '
                  'jobs').format(base=EDPSparkEngine.edp_base_version,
                                 type=job.type))

        spark_nodes_count = plugin_utils.get_instances_count(
            cluster, p_common.SPARK_JOBHISTORYSERVER)
        if spark_nodes_count != 1:
            raise pex.InvalidComponentCountException(
                p_common.SPARK_JOBHISTORYSERVER, '1', spark_nodes_count)

        super(EDPSparkEngine, self).validate_job_execution(cluster, job, data)