Ejemplo n.º 1
0
    def _impala_validation(cls, cluster):
        ics_count = cls.get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
        iss_count = cls.get_inst_count(cluster, 'IMPALA_STATESTORE')
        id_count = cls.get_inst_count(cluster, 'IMPALAD')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        hms_count = cls.get_inst_count(cluster, 'HIVE_METASTORE')

        if ics_count > 1:
            raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
                                                    _('0 or 1'), ics_count)
        if iss_count > 1:
            raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
                                                    _('0 or 1'), iss_count)
        if ics_count == 1:
            datanode_ng = u.get_node_groups(cluster, "HDFS_DATANODE")
            impalad_ng = u.get_node_groups(cluster, "IMPALAD")
            datanodes = set(ng.id for ng in datanode_ng)
            impalads = set(ng.id for ng in impalad_ng)

            if datanodes != impalads:
                raise ex.InvalidClusterTopology(
                    _("IMPALAD must be installed on every HDFS_DATANODE"))

            if iss_count != 1:
                raise ex.RequiredServiceMissingException('IMPALA_STATESTORE',
                                                         required_by='IMPALA')
            if id_count < 1:
                raise ex.RequiredServiceMissingException('IMPALAD',
                                                         required_by='IMPALA')
            if dn_count < 1:
                raise ex.RequiredServiceMissingException('HDFS_DATANODE',
                                                         required_by='IMPALA')
            if hms_count < 1:
                raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                         required_by='IMPALA')
Ejemplo n.º 2
0
    def _hdfs_ha_validation(cls, cluster):
        jn_count = cls.get_inst_count(cluster, 'HDFS_JOURNALNODE')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')

        require_anti_affinity = cls.PU.c_helper.get_required_anti_affinity(
            cluster)

        if jn_count > 0:
            if jn_count < 3:
                raise ex.InvalidComponentCountException(
                    'HDFS_JOURNALNODE', _('not less than 3'), jn_count)
            if not jn_count % 2:
                raise ex.InvalidComponentCountException(
                    'HDFS_JOURNALNODE', _('be odd'), jn_count)
            if zk_count < 1:
                raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                         required_by='HDFS HA')
            if require_anti_affinity:
                if 'HDFS_SECONDARYNAMENODE' not in \
                        cls._get_anti_affinity(cluster):
                    raise ex.NameNodeHAConfigurationError(
                        _('HDFS_SECONDARYNAMENODE should be enabled '
                          'in anti_affinity.'))
                if 'HDFS_NAMENODE' not in cls._get_anti_affinity(cluster):
                    raise ex.NameNodeHAConfigurationError(
                        _('HDFS_NAMENODE should be enabled in anti_affinity.'))
Ejemplo n.º 3
0
    def _yarn_ha_validation(cls, cluster):
        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')
        stdb_rm_count = cls.get_inst_count(cluster, 'YARN_STANDBYRM')

        require_anti_affinity = cls.PU.c_helper.get_required_anti_affinity(
            cluster)

        if stdb_rm_count > 1:
            raise ex.InvalidComponentCountException('YARN_STANDBYRM',
                                                    _('0 or 1'), stdb_rm_count)
        if stdb_rm_count > 0:
            if rm_count < 1:
                raise ex.RequiredServiceMissingException(
                    'YARN_RESOURCEMANAGER', required_by='RM HA')
            if zk_count < 1:
                raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                         required_by='RM HA')
            if require_anti_affinity:
                if 'YARN_RESOURCEMANAGER' not in \
                        cls._get_anti_affinity(cluster):
                    raise ex.ResourceManagerHAConfigurationError(
                        _('YARN_RESOURCEMANAGER should be enabled in '
                          'anti_affinity.'))
                if 'YARN_STANDBYRM' not in cls._get_anti_affinity(cluster):
                    raise ex.ResourceManagerHAConfigurationError(
                        _('YARN_STANDBYRM should be'
                          ' enabled in anti_affinity.'))
Ejemplo n.º 4
0
    def _hue_validation(cls, cluster):
        hue_count = cls.get_inst_count(cluster, 'HUE_SERVER')
        if hue_count > 1:
            raise ex.InvalidComponentCountException('HUE_SERVER', _('0 or 1'),
                                                    hue_count)

        shs_count = cls.get_inst_count(cluster, 'SPARK_YARN_HISTORY_SERVER')
        hms_count = cls.get_inst_count(cluster, 'HIVE_METASTORE')
        oo_count = cls.get_inst_count(cluster, 'OOZIE_SERVER')
        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')

        if shs_count > 1:
            raise ex.InvalidComponentCountException(
                'SPARK_YARN_HISTORY_SERVER', _('0 or 1'), shs_count)
        if shs_count and not rm_count:
            raise ex.RequiredServiceMissingException(
                'YARN_RESOURCEMANAGER',
                required_by='SPARK_YARN_HISTORY_SERVER')

        if oo_count < 1 and hue_count:
            raise ex.RequiredServiceMissingException('OOZIE_SERVER',
                                                     required_by='HUE_SERVER')

        if hms_count < 1 and hue_count:
            raise ex.RequiredServiceMissingException('HIVE_METASTORE',
                                                     required_by='HUE_SERVER')
Ejemplo n.º 5
0
    def _basic_validation(cls, cluster):

        mng_count = cls.get_inst_count(cluster, 'CLOUDERA_MANAGER')
        if mng_count != 1:
            raise ex.InvalidComponentCountException('CLOUDERA_MANAGER', 1,
                                                    mng_count)

        nn_count = cls.get_inst_count(cluster, 'HDFS_NAMENODE')
        if nn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1,
                                                    nn_count)

        snn_count = cls.get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
        if snn_count != 1:
            raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE',
                                                    1, snn_count)
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        replicas = cls.PU.get_config_value('HDFS', 'dfs_replication', cluster)
        if dn_count < replicas:
            raise ex.InvalidComponentCountException(
                'HDFS_DATANODE', replicas, dn_count,
                _('Number of datanodes must be not'
                  ' less than dfs_replication.'))

        du_reserved = cls.PU.get_config_value('DATANODE',
                                              'dfs_datanode_du_reserved',
                                              cluster)
        du_reserved = du_reserved / 1073741824.
        for node_group in cluster.node_groups:
            volume_size = node_group.volumes_size
            if volume_size and volume_size < du_reserved:
                raise ex.InvalidVolumeSizeException(volume_size, du_reserved)

        rm_count = cls.get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
        if rm_count > 1:
            raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
                                                    _('0 or 1'), rm_count)

        hs_count = cls.get_inst_count(cluster, 'YARN_JOBHISTORY')
        if hs_count > 1:
            raise ex.InvalidComponentCountException('YARN_JOBHISTORY',
                                                    _('0 or 1'), hs_count)

        if rm_count > 0 and hs_count < 1:
            raise ex.RequiredServiceMissingException(
                'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')

        nm_count = cls.get_inst_count(cluster, 'YARN_NODEMANAGER')
        if rm_count == 0:
            if nm_count > 0:
                raise ex.RequiredServiceMissingException(
                    'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')
Ejemplo n.º 6
0
    def get_service_by_role(self, role, cluster=None, instance=None):
        if cluster:
            cm_cluster = self.get_cloudera_cluster(cluster)
        elif instance:
            cm_cluster = self.get_cloudera_cluster(instance.cluster)
        else:
            raise ValueError(_("'cluster' or 'instance' argument missed"))

        if role in [
                'NAMENODE', 'DATANODE', 'SECONDARYNAMENODE', 'HDFS_GATEWAY'
        ]:
            return cm_cluster.get_service(self.HDFS_SERVICE_NAME)
        elif role in [
                'RESOURCEMANAGER', 'NODEMANAGER', 'JOBHISTORY', 'YARN_GATEWAY'
        ]:
            return cm_cluster.get_service(self.YARN_SERVICE_NAME)
        elif role in ['OOZIE_SERVER']:
            return cm_cluster.get_service(self.OOZIE_SERVICE_NAME)
        elif role in ['HIVESERVER2', 'HIVEMETASTORE', 'WEBHCAT']:
            return cm_cluster.get_service(self.HIVE_SERVICE_NAME)
        elif role in ['HUE_SERVER']:
            return cm_cluster.get_service(self.HUE_SERVICE_NAME)
        elif role in ['SPARK_YARN_HISTORY_SERVER']:
            return cm_cluster.get_service(self.SPARK_SERVICE_NAME)
        elif role in ['SERVER']:
            return cm_cluster.get_service(self.ZOOKEEPER_SERVICE_NAME)
        elif role in ['MASTER', 'REGIONSERVER']:
            return cm_cluster.get_service(self.HBASE_SERVICE_NAME)
        elif role in ['AGENT']:
            return cm_cluster.get_service(self.FLUME_SERVICE_NAME)
        elif role in ['SENTRY_SERVER']:
            return cm_cluster.get_service(self.SENTRY_SERVICE_NAME)
        elif role in ['SQOOP_SERVER']:
            return cm_cluster.get_service(self.SQOOP_SERVICE_NAME)
        elif role in ['SOLR_SERVER']:
            return cm_cluster.get_service(self.SOLR_SERVICE_NAME)
        elif role in ['HBASE_INDEXER']:
            return cm_cluster.get_service(self.KS_INDEXER_SERVICE_NAME)
        elif role in ['CATALOGSERVER', 'STATESTORE', 'IMPALAD', 'LLAMA']:
            return cm_cluster.get_service(self.IMPALA_SERVICE_NAME)
        elif role in ['KMS']:
            return cm_cluster.get_service(self.KMS_SERVICE_NAME)
        elif role in ['JOURNALNODE']:
            return cm_cluster.get_service(self.HDFS_SERVICE_NAME)
        elif role in ['YARN_STANDBYRM']:
            return cm_cluster.get_service(self.YARN_SERVICE_NAME)
        elif role in ['KAFKA_BROKER']:
            return cm_cluster.get_service(self.KAFKA_SERVICE_NAME)
        else:
            raise ValueError(
                _("Process %(process)s is not supported by CDH plugin") %
                {'process': role})
Ejemplo n.º 7
0
def create_cluster(resource_root, name, version=None, fullVersion=None):
    """Create a cluster

    :param resource_root: The root Resource object.
    :param name: Cluster name
    :param version: Cluster CDH major version (eg: "CDH4")
                    - The CDH minor version will be assumed to be the
                      latest released version for CDH4, or 5.0 for CDH5.
    :param fullVersion: Cluster's full CDH version. (eg: "5.1.1")
                        - If specified, 'version' will be ignored.
                        - Since: v6
    :return: An ApiCluster object
    """
    if version is None and fullVersion is None:
        raise ex.CMApiVersionError(
            _("Either 'version' or 'fullVersion' must be specified"))
    if fullVersion is not None:
        api_version = 6
        version = None
    else:
        api_version = 1

    apicluster = ApiCluster(resource_root, name, version, fullVersion)
    return types.call(resource_root.post,
                      CLUSTERS_PATH,
                      ApiCluster,
                      True,
                      data=[apicluster],
                      api_version=api_version)[0]
Ejemplo n.º 8
0
    def validate_additional_ng_scaling(cls, cluster, additional):
        rm = cls.PU.get_resourcemanager(cluster)
        scalable_processes = cls._get_scalable_processes()

        for ng_id in additional:
            ng = u.get_by_id(cluster.node_groups, ng_id)
            if not set(ng.node_processes).issubset(scalable_processes):
                msg = _("CDH plugin cannot scale nodegroup with processes: "
                        "%(processes)s")
                raise ex.NodeGroupCannotBeScaled(
                    ng.name, msg % {'processes': ' '.join(ng.node_processes)})

            if not rm and 'YARN_NODEMANAGER' in ng.node_processes:
                msg = _("CDH plugin cannot scale node group with processes "
                        "which have no master-processes run in cluster")
                raise ex.NodeGroupCannotBeScaled(ng.name, msg)
Ejemplo n.º 9
0
    def get(self, relpath=None, params=None):
        """Invoke the GET method on a resource

        :param relpath: Optional. A relative path to this resource's path.
        :param params: Key-value data.

        :return: A dictionary of the JSON result.
        """
        for retry in six.moves.xrange(self.retries + 1):
            if retry:
                context.sleep(self.retry_sleep)
            try:
                return self.invoke("GET", relpath, params)
            except (socket.error, urllib.error.URLError) as e:
                if "timed out" in six.text_type(e).lower():
                    if retry < self.retries:
                        LOG.warning("Timeout issuing GET request for "
                                    "{path}. Will retry".format(
                                        path=self._join_uri(relpath)))
                    else:
                        LOG.warning("Timeout issuing GET request for "
                                    "{path}. No retries left".format(
                                        path=self._join_uri(relpath)))
                else:
                    raise
        else:
            raise ex.CMApiException(_("Get retry max time reached."))
Ejemplo n.º 10
0
 def _start_cloudera_manager(self, cluster, timeout_config):
     manager = self.get_manager(cluster)
     with manager.remote() as r:
         cmd.start_cloudera_db(r)
         cmd.start_manager(r)
     u.plugin_option_poll(
         cluster, self._check_cloudera_manager_started, timeout_config,
         _("Await starting Cloudera Manager"), 2, {'manager': manager})
Ejemplo n.º 11
0
 def _await_agents(self, cluster, instances, timeout_config):
     api = self.get_api_client(instances[0].cluster)
     utils.plugin_option_poll(cluster,
                              self._agents_connected, timeout_config,
                              _("Await Cloudera agents"), 5, {
                                  'instances': instances,
                                  'api': api
                              })
Ejemplo n.º 12
0
 def _check_attr(self, name, allow_ro):
     cls_name = reflection.get_class_name(self, fully_qualified=False)
     if name not in self._get_attributes():
         raise ex.CMApiAttributeError(
             _('Invalid property %(attname)s for class %(classname)s.') % {
                 'attname': name,
                 'classname': cls_name
             })
     attr = self._get_attributes()[name]
     if not allow_ro and attr and not attr.rw:
         raise ex.CMApiAttributeError(
             _('Attribute %(attname)s of class %(classname)s '
               'is read only.') % {
                   'attname': name,
                   'classname': cls_name
               })
     return attr
Ejemplo n.º 13
0
 def configure_os(self, instances):
     # instances non-empty
     u.add_provisioning_step(
         instances[0].cluster_id, _("Configure OS"), len(instances))
     with context.PluginsThreadGroup() as tg:
         for inst in instances:
             tg.spawn('cdh-repo-conf-%s' % inst.instance_name,
                      self._configure_repo_from_inst, inst)
Ejemplo n.º 14
0
    def _hbase_validation(cls, cluster):
        hbm_count = cls.get_inst_count(cluster, 'HBASE_MASTER')
        hbr_count = cls.get_inst_count(cluster, 'HBASE_REGIONSERVER')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')

        if hbm_count == 1:
            if zk_count < 1:
                raise ex.RequiredServiceMissingException('ZOOKEEPER',
                                                         required_by='HBASE')
            if hbr_count < 1:
                raise ex.InvalidComponentCountException(
                    'HBASE_REGIONSERVER', _('at least 1'), hbr_count)
        elif hbm_count > 1:
            raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                    _('0 or 1'), hbm_count)
        elif hbr_count >= 1:
            raise ex.InvalidComponentCountException('HBASE_MASTER',
                                                    _('at least 1'), hbm_count)
Ejemplo n.º 15
0
    def install_packages(self, instances, packages):
        # instances non-empty
        u.add_provisioning_step(
            instances[0].cluster_id, _("Install packages"), len(instances))

        with context.PluginsThreadGroup() as tg:
            for i in instances:
                tg.spawn('cdh-inst-pkgs-%s' % i.instance_name,
                         self._install_pkgs, i, packages)
Ejemplo n.º 16
0
 def update_configs(self, instances):
     # instances non-empty
     utils.add_provisioning_step(instances[0].cluster_id,
                                 _("Update configs"), len(instances))
     with context.PluginsThreadGroup() as tg:
         for instance in instances:
             tg.spawn("update-configs-%s" % instance.instance_name,
                      self._update_configs, instance)
             context.sleep(1)
Ejemplo n.º 17
0
    def start_cloudera_agents(self, instances):
        # instances non-empty
        u.add_provisioning_step(
            instances[0].cluster_id, _("Start Cloudera Agents"),
            len(instances))

        with context.PluginsThreadGroup() as tg:
            for i in instances:
                tg.spawn('cdh-agent-start-%s' % i.instance_name,
                         self._start_cloudera_agent, i)
Ejemplo n.º 18
0
    def configure_swift(self, cluster, instances=None):
        if self.c_helper.is_swift_enabled(cluster):
            if not instances:
                instances = u.get_instances(cluster)
            u.add_provisioning_step(
                cluster.id, _("Configure Swift"), len(instances))

            with context.PluginsThreadGroup() as tg:
                for i in instances:
                    tg.spawn('cdh-swift-conf-%s' % i.instance_name,
                             self._configure_swift_to_inst, i)
            swift_helper.install_ssl_certs(instances)
Ejemplo n.º 19
0
def install_packages(remote, packages, timeout=1800):
    distrib = _get_os_distrib(remote)
    if distrib == 'ubuntu':
        cmd = 'RUNLEVEL=1 apt-get install -y %s'
    elif distrib == 'centos':
        cmd = 'yum install -y %s'
    else:
        raise ex.HadoopProvisionError(
            _("OS on image is not supported by CDH plugin"))

    cmd = cmd % ' '.join(packages)
    _root(remote, cmd, timeout=timeout)
Ejemplo n.º 20
0
def check_api_version(resource_root, min_version):
    """Check API version

    Checks if the resource_root's API version it at least the given minimum
    version.
    """
    if resource_root.version < min_version:
        raise ex.CMApiVersionError(
            _("API version %(minv)s is required but %(acv)s is in use.") % {
                'minv': min_version,
                'acv': resource_root.version
            })
Ejemplo n.º 21
0
class CMApiValueError(e.SaharaPluginException):
    """Exception indicating a CM API value error.

    A message indicating the reason for failure must be provided.
    """

    base_message = _("CM API value error: %s")

    def __init__(self, message):
        self.code = "CM_API_VALUE_ERROR"
        self.message = self.base_message % message

        super(CMApiValueError, self).__init__()
Ejemplo n.º 22
0
class CMApiVersionError(e.SaharaPluginException):
    """Exception indicating that CM API Version does not meet requirement.

    A message indicating the reason for failure must be provided.
    """

    base_message = _("CM API version not meet requirement: %s")

    def __init__(self, message):
        self.code = "CM_API_VERSION_ERROR"
        self.message = self.base_message % message

        super(CMApiVersionError, self).__init__()
Ejemplo n.º 23
0
class CMApiException(e.SaharaPluginException):
    """Exception Type from CM API Errors.

    Any error result from the CM API is converted into this exception type.
    This handles errors from the HTTP level as well as the API level.
    """

    base_message = _("CM API error: %s")

    def __init__(self, message):
        self.code = "CM_API_EXCEPTION"
        self.message = self.base_message % message

        super(CMApiException, self).__init__()
Ejemplo n.º 24
0
    def validate_job_execution(self, cluster, job, data):
        if not self.edp_supported(cluster.hadoop_version):
            raise pl_ex.PluginInvalidDataException(
                _('Cloudera {base} or higher required to run {type}'
                  'jobs').format(base=self.edp_base_version, type=job.type))

        shs_count = u.get_instances_count(
            cluster, 'SPARK_YARN_HISTORY_SERVER')
        if shs_count != 1:
            raise pl_ex.InvalidComponentCountException(
                'SPARK_YARN_HISTORY_SERVER', '1', shs_count)

        super(EdpSparkEngine, self).validate_job_execution(
            cluster, job, data)
Ejemplo n.º 25
0
    def validate_existing_ng_scaling(cls, cluster, existing):
        scalable_processes = cls._get_scalable_processes()
        dn_to_delete = 0
        for ng in cluster.node_groups:
            if ng.id in existing:
                if (ng.count > existing[ng.id]
                        and "HDFS_DATANODE" in ng.node_processes):
                    dn_to_delete += ng.count - existing[ng.id]

                if not set(ng.node_processes).issubset(scalable_processes):
                    msg = _("CDH plugin cannot scale nodegroup"
                            " with processes: %(processes)s")
                    raise ex.NodeGroupCannotBeScaled(
                        ng.name,
                        msg % {'processes': ' '.join(ng.node_processes)})

        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE') - dn_to_delete
        replicas = cls.PU.get_config_value('HDFS', 'dfs_replication', cluster)
        if dn_count < replicas:
            raise ex.ClusterCannotBeScaled(
                cluster,
                _('Number of datanodes must be not'
                  ' less than dfs_replication.'))
Ejemplo n.º 26
0
 def _get_config_value(self, service, name, configs, cluster=None):
     if cluster:
         conf = cluster.cluster_configs
         if service in conf and name in conf[service]:
             return u.transform_to_num(conf[service][name])
         for node_group in cluster.node_groups:
             conf = node_group.node_configs
             if service in conf and name in conf[service]:
                 return u.transform_to_num(conf[service][name])
     for config in configs:
         if config.applicable_target == service and config.name == name:
             return u.transform_to_num(config.default_value)
     raise exc.InvalidDataException(
         _("Unable to find config: applicable_target: {target}, name: "
           "{name}").format(target=service, name=name))
Ejemplo n.º 27
0
    def _update(self, api_obj):
        """Copy state from api_obj to this object."""
        if not isinstance(self, api_obj.__class__):
            raise ex.CMApiValueError(
                _("Class %(class1)s does not derive from %(class2)s; "
                  "cannot update attributes.") % {
                      'class1': self.__class__,
                      'class2': api_obj.__class__
                  })

        for name in self._get_attributes().keys():
            try:
                val = getattr(api_obj, name)
                setattr(self, name, val)
            except AttributeError:
                pass
Ejemplo n.º 28
0
    def _sentry_validation(cls, cluster):

        snt_count = cls.get_inst_count(cluster, 'SENTRY_SERVER')
        dn_count = cls.get_inst_count(cluster, 'HDFS_DATANODE')
        zk_count = cls.get_inst_count(cluster, 'ZOOKEEPER_SERVER')

        if snt_count > 1:
            raise ex.InvalidComponentCountException('SENTRY_SERVER',
                                                    _('0 or 1'), snt_count)
        if snt_count == 1:
            if dn_count < 1:
                raise ex.RequiredServiceMissingException(
                    'HDFS_DATANODE', required_by='SENTRY_SERVER')
            if zk_count < 1:
                raise ex.RequiredServiceMissingException(
                    'ZOOKEEPER', required_by='SENTRY_SERVER')
Ejemplo n.º 29
0
    def _require_min_api_version(self, version):
        """Check minimum version requirement

        Raise an exception if the version of the api is less than the given
        version.

        :param version: The minimum required version.
        """
        actual_version = self._get_resource_root().version
        version = max(version, self._api_version())
        if actual_version < version:
            raise ex.CMApiVersionError(
                _("API version %(minv)s is required but %(acv)s is in use.") %
                {
                    'minv': version,
                    'acv': actual_version
                })
    def test_start_cloudera_manager(self, start_manager, start_cloudera_db,
                                    plugin_option_poll, log_cfg):

        cluster = get_concrete_cluster()
        manager = cluster.node_groups[0].instances[0]

        self.plug_utils.start_cloudera_manager(cluster)
        with manager.remote() as r:
            start_manager.assert_called_once_with(r)
            start_cloudera_db.assert_called_once_with(r)

        call = [cluster,
                self.plug_utils._check_cloudera_manager_started,
                self.plug_utils.c_helper.AWAIT_MANAGER_STARTING_TIMEOUT,
                _("Await starting Cloudera Manager"),
                2, {'manager': manager}]
        plugin_option_poll.assert_called_once_with(*call)