예제 #1
0
def get_hive_password(cluster):
    cluster = conductor.cluster_get(context.ctx(), cluster)
    extra = cluster.extra.to_dict()
    if 'hive_pass_id' not in extra:
        extra['hive_pass_id'] = u.generate_random_password()
        conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
    return castellan.get_secret(extra['hive_pass_id'])
예제 #2
0
    def test_cleanup_configs(self):
        remote = mock.Mock()
        instance = mock.Mock()

        extra_conf = {'job_cleanup': {
            'valid': True,
            'script': 'script_text',
            'cron': 'cron_text'}}
        instance.node_group.node_processes = ["master"]
        instance.node_group.id = id
        cluster_dict = self._init_cluster_dict('2.2')

        cluster = conductor.cluster_create(context.ctx(), cluster_dict)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
        plugin._push_cleanup_job(remote, cluster, extra_conf, instance)
        remote.write_file_to.assert_called_with(
            '/etc/hadoop/tmp-cleanup.sh',
            'script_text')
        remote.execute_command.assert_called_with(
            'sudo sh -c \'echo "cron_text" > /etc/cron.d/spark-cleanup\'')

        remote.reset_mock()
        instance.node_group.node_processes = ["worker"]
        plugin._push_cleanup_job(remote, cluster, extra_conf, instance)
        self.assertFalse(remote.called)

        remote.reset_mock()
        instance.node_group.node_processes = ["master"]
        extra_conf['job_cleanup']['valid'] = False
        plugin._push_cleanup_job(remote, cluster, extra_conf, instance)
        remote.execute_command.assert_called_with(
            'sudo rm -f /etc/crond.d/spark-cleanup')
    def _update_cluster_info(self, cluster_context):
        LOG.debug('Updating UI information.')
        info = {'Admin user credentials': {'Username': '******',
                                           'Password': pu.get_mapr_password
                                           (cluster_context.cluster)}}
        for service in cluster_context.cluster_services:
            for title, node_process, ui_info in (
                    service.get_ui_info(cluster_context)):
                removed = cluster_context.removed_instances(node_process)
                instances = cluster_context.get_instances(node_process)
                instances = [i for i in instances if i not in removed]

                if len(instances) == 1:
                    display_name_template = "%(title)s"
                else:
                    display_name_template = "%(title)s %(index)s"

                for index, instance in enumerate(instances, start=1):
                    args = {"title": title, "index": index}
                    display_name = display_name_template % args
                    data = ui_info.copy()
                    data[srvc.SERVICE_UI] = (data[srvc.SERVICE_UI] %
                                             instance.get_ip_or_dns_name())
                    info.update({display_name: data})

        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster_context.cluster, {'info': info})
예제 #4
0
    def _set_cluster_info(self, cluster):
        nn = utils.get_instance(cluster, "namenode")
        sp_master = utils.get_instance(cluster, "master")
        info = {}

        if nn:
            address = utils.get_config_value_or_default(
                'HDFS', 'dfs.http.address', cluster)
            port = address[address.rfind(':') + 1:]
            info['HDFS'] = {
                'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), port)
            }
            info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()

        if sp_master:
            port = utils.get_config_value_or_default('Spark',
                                                     'Master webui port',
                                                     cluster)
            if port is not None:
                info['Spark'] = {
                    'Web UI':
                    'http://%s:%s' % (sp_master.get_ip_or_dns_name(), port)
                }
        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
예제 #5
0
def _prepare_ranger(cluster):
    ranger = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN)
    if not ranger:
        return
    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
    with ambari.remote() as r:
        sudo = functools.partial(r.execute_command, run_as_root=True)
        sudo("ambari-server setup --jdbc-db=mysql "
             "--jdbc-driver=/usr/share/java/mysql-connector-java.jar")
    init_db_template = (
        "create user 'root'@'%' identified by '{password}';\n"
        "set password for 'root'@'localhost' = password('{password}');")
    password = uuidutils.generate_uuid()
    extra = cluster.extra.to_dict() if cluster.extra else {}
    extra["ranger_db_password"] = password
    ctx = context.ctx()
    conductor.cluster_update(ctx, cluster, {"extra": extra})
    with ranger.remote() as r:
        sudo = functools.partial(r.execute_command, run_as_root=True)
        # TODO(sreshetnyak): add ubuntu support
        sudo("yum install -y mysql-server")
        sudo("service mysqld start")
        r.write_file_to("/tmp/init.sql",
                        init_db_template.format(password=password))
        sudo("mysql < /tmp/init.sql")
        sudo("rm /tmp/init.sql")
예제 #6
0
    def setup_context(self, username="******", tenant_id="tenant_1",
                      auth_token="test_auth_token", tenant_name='test_tenant',
                      service_catalog=None, **kwargs):
        self.addCleanup(context.set_ctx,
                        context.ctx() if context.has_ctx() else None)

        context.set_ctx(context.PluginsContext(
            username=username, tenant_id=tenant_id,
            auth_token=auth_token, service_catalog=service_catalog or {},
            tenant_name=tenant_name, **kwargs))
    def _set_cluster_info(self, cluster):
        info = self.cloudera_utils.get_cloudera_manager_info(cluster)
        hue = self.cloudera_utils.pu.get_hue(cluster)
        if hue:
            info['Hue Dashboard'] = {
                'Web UI': 'http://%s:8888' % hue.get_ip_or_dns_name()
            }

        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
예제 #8
0
def update_default_ambari_password(cluster):
    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
    new_password = uuidutils.generate_uuid()
    with ambari_client.AmbariClient(ambari) as client:
        client.update_user_password("admin", "admin", new_password)
    extra = cluster.extra.to_dict() if cluster.extra else {}
    extra["ambari_password"] = new_password
    ctx = context.ctx()
    conductor.cluster_update(ctx, cluster, {"extra": extra})
    cluster = conductor.cluster_get(ctx, cluster.id)
def validate(cluster_id):
    ctx = context.ctx()
    cluster = conductor.cluster_get(ctx, cluster_id)
    _check_ambari(cluster)
    _check_hdfs(cluster)
    _check_yarn(cluster)
    _check_oozie(cluster)
    _check_hive(cluster)
    _check_hbase(cluster)
    _check_spark(cluster)
    _check_ranger(cluster)
    _check_storm(cluster)
    def test_plugin_edp_engine(self):
        cluster_dict = {
            'name': 'cluster',
            'plugin_name': 'cdh',
            'hadoop_version': '5.7.0',
            'default_image_id': 'image'}

        cluster = conductor.cluster_create(context.ctx(), cluster_dict)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
        self.assertIsInstance(
            plugin.get_edp_engine(cluster, edp.JOB_TYPE_SPARK),
            edp.PluginsSparkJobEngine)
예제 #11
0
    def test_validate(self, mock_utils):

        cluster_data = self._get_cluster('cluster', '1.1.0')
        cluster = conductor.cluster_create(context.ctx(), cluster_data)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)

        # number of nimbus nodes != 1 should raise an exception
        fake_ng = mock.Mock()
        fake_ng.count = 0
        mock_ng = mock.Mock(return_value=[fake_ng])
        mock_utils.get_node_groups = mock_ng

        self.assertRaises(ex.RequiredServiceMissingException, plugin.validate,
                          cluster)

        mock_ng.assert_called_once_with(cluster, "nimbus")

        fake_ng.count = 2
        self.assertRaises(ex.InvalidComponentCountException, plugin.validate,
                          cluster)

        mock_ng.assert_called_with(cluster, "nimbus")
        self.assertEqual(2, mock_ng.call_count)

        # no supervisor should raise an exception
        fake_nimbus = mock.Mock()
        fake_nimbus.count = 1

        fake_supervisor = mock.Mock()
        fake_supervisor.count = 0

        mock_ng = mock.Mock(side_effect=[[fake_nimbus], [fake_supervisor]])
        mock_utils.get_node_groups = mock_ng

        self.assertRaises(ex.InvalidComponentCountException, plugin.validate,
                          cluster)

        mock_ng.assert_any_call(cluster, "nimbus")
        mock_ng.assert_any_call(cluster, "supervisor")
        self.assertEqual(2, mock_ng.call_count)

        # one nimbus and one or more supervisors should not raise an exception
        fake_nimbus.count = 1
        fake_supervisor.count = 2

        mock_ng = mock.Mock(side_effect=[[fake_nimbus], [fake_supervisor]])
        mock_utils.get_node_groups = mock_ng

        plugin.validate(cluster)

        mock_ng.assert_any_call(cluster, "nimbus")
        mock_ng.assert_any_call(cluster, "supervisor")
        self.assertEqual(2, mock_ng.call_count)
예제 #12
0
 def scale_cluster(self, cluster, instances):
     deploy.prepare_kerberos(cluster, instances)
     deploy.setup_agents(cluster, instances)
     cluster = conductor.cluster_get(context.ctx(), cluster.id)
     deploy.wait_host_registration(cluster, instances)
     deploy.resolve_package_conflicts(cluster, instances)
     deploy.add_new_hosts(cluster, instances)
     deploy.manage_config_groups(cluster, instances)
     deploy.manage_host_components(cluster, instances)
     deploy.configure_rack_awareness(cluster, instances)
     swift_helper.install_ssl_certs(instances)
     deploy.add_hadoop_swift_jar(instances)
     deploy.deploy_kerberos_principals(cluster, instances)
예제 #13
0
 def configure_cluster(self, cluster):
     deploy.disable_repos(cluster)
     deploy.setup_ambari(cluster)
     deploy.setup_agents(cluster)
     deploy.wait_ambari_accessible(cluster)
     deploy.update_default_ambari_password(cluster)
     cluster = conductor.cluster_get(context.ctx(), cluster.id)
     deploy.wait_host_registration(cluster,
                                   plugin_utils.get_instances(cluster))
     deploy.prepare_kerberos(cluster)
     deploy.set_up_hdp_repos(cluster)
     deploy.resolve_package_conflicts(cluster)
     deploy.create_blueprint(cluster)
예제 #14
0
    def _set_cluster_info(self, cluster):
        st_master = utils.get_instance(cluster, "nimbus")
        info = {}

        if st_master:
            port = "8080"

            info['Strom'] = {
                'Web UI':
                'http://%s:%s' % (st_master.get_ip_or_dns_name(), port)
            }
        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
예제 #15
0
    def test_edp_calls_hadoop2_create_dir(self, create_dir):
        for version in ['2.7.1']:
            cluster_dict = {
                'name': 'cluster' + version.replace('.', '_'),
                'plugin_name': 'vanilla',
                'hadoop_version': version,
                'default_image_id': 'image'}

            cluster = conductor.cluster_create(context.ctx(), cluster_dict)
            plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
            create_dir.reset_mock()
            plugin.get_edp_engine(cluster, edp.JOB_TYPE_PIG).create_hdfs_dir(
                mock.Mock(), '/tmp')
            self.assertEqual(1, create_dir.call_count)
예제 #16
0
def delete_password_from_keymanager(cluster, pwname):
    """delete the named password from the key manager

    This function will lookup the named password in the cluster entry
    and delete it from the key manager.

    :param cluster: The cluster record containing the password
    :param pwname: The name associated with the password
    """
    ctx = context.ctx()
    cluster = conductor.cluster_get(ctx, cluster.id)
    key_id = cluster.extra.get(pwname) if cluster.extra else None
    if key_id is not None:
        key_manager.delete_key(key_id, ctx)
예제 #17
0
    def _upload_job_files_to_hdfs(self,
                                  where,
                                  job_dir,
                                  job,
                                  configs,
                                  proxy_configs=None):
        mains = job.mains or []
        libs = job.libs or []
        builtin_libs = edp.get_builtin_binaries(job, configs)
        uploaded_paths = []
        hdfs_user = self.get_hdfs_user()
        lib_dir = job_dir + '/lib'

        with where.remote() as r:
            for m in mains:
                path = jb_manager.JOB_BINARIES. \
                    get_job_binary_by_url(m.url). \
                    copy_binary_to_cluster(m, proxy_configs=proxy_configs,
                                           remote=r, context=context.ctx())
                target = os.path.join(job_dir, m.name)
                mfs.copy_from_local(r, path, target, hdfs_user)
                uploaded_paths.append(target)
            if len(libs) > 0:
                self.create_hdfs_dir(r, lib_dir)
            for l in libs:
                path = jb_manager.JOB_BINARIES. \
                    get_job_binary_by_url(l.url). \
                    copy_binary_to_cluster(l, proxy_configs=proxy_configs,
                                           remote=r, context=context.ctx())
                target = os.path.join(lib_dir, l.name)
                mfs.copy_from_local(r, path, target, hdfs_user)
                uploaded_paths.append(target)
            for lib in builtin_libs:
                mfs.put_file_to_maprfs(r, lib['raw'], lib['name'], lib_dir,
                                       hdfs_user)
                uploaded_paths.append(lib_dir + '/' + lib['name'])
        return uploaded_paths
예제 #18
0
def create_blueprint(cluster):
    _prepare_ranger(cluster)
    cluster = conductor.cluster_get(context.ctx(), cluster.id)
    host_groups = []
    for ng in cluster.node_groups:
        procs = p_common.get_ambari_proc_list(ng)
        procs.extend(p_common.get_clients(cluster))
        for instance in ng.instances:
            hg = {
                "name": instance.instance_name,
                "configurations": configs.get_instance_params(instance),
                "components": get_host_group_components(cluster, procs)
            }
            host_groups.append(hg)
    bp = {
        "Blueprints": {
            "stack_name": "HDP",
            "stack_version": cluster.hadoop_version,
        },
        "host_groups": host_groups,
        "configurations": configs.get_cluster_params(cluster)
    }

    if kerberos.is_kerberos_security_enabled(cluster):
        bp['configurations'].extend([
            _serialize_mit_kdc_kerberos_env(cluster),
            _serialize_krb5_configs(cluster)
        ])
        bp['Blueprints']['security'] = {'type': 'KERBEROS'}

    general_configs = cluster.cluster_configs.get("general", {})
    if (general_configs.get(p_common.NAMENODE_HA)
            or general_configs.get(p_common.RESOURCEMANAGER_HA)
            or general_configs.get(p_common.HBASE_REGIONSERVER_HA)):
        bp = ha_helper.update_bp_ha_common(cluster, bp)

    if general_configs.get(p_common.NAMENODE_HA):
        bp = ha_helper.update_bp_for_namenode_ha(cluster, bp)

    if general_configs.get(p_common.RESOURCEMANAGER_HA):
        bp = ha_helper.update_bp_for_resourcemanager_ha(cluster, bp)

    if general_configs.get(p_common.HBASE_REGIONSERVER_HA):
        bp = ha_helper.update_bp_for_hbase_ha(cluster, bp)

    with _get_ambari_client(cluster) as client:
        return client.create_blueprint(cluster.name, bp)
예제 #19
0
    def test_validate_additional_ng_scaling_raises(self):
        data = [{
            'name': 'master',
            'flavor_id': '42',
            'count': 1,
            'node_processes': ['nimbus']
        }, {
            'name': 'slave',
            'flavor_id': '42',
            'count': 1,
            'node_processes': ['supervisor']
        }, {
            'name': 'zookeeper',
            'flavor_id': '42',
            'count': 1,
            'node_processes': ['zookeeper']
        }, {
            'name': 'master2',
            'flavor_id': '42',
            'count': 0,
            'node_processes': ['nimbus']
        }]

        cluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')
        cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')
        cluster_data_120 = self._get_cluster('cluster_1.2.0', '1.2')
        cluster_data_101['node_groups'] = data
        cluster_data_110['node_groups'] = data
        cluster_data_120['node_groups'] = data

        clusters = [cluster_data_101, cluster_data_110, cluster_data_120]

        for cluster_data in clusters:
            cluster = conductor.cluster_create(context.ctx(), cluster_data)
            plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
            master_id = [
                node.id for node in cluster.node_groups
                if node.name == 'master2'
            ]
            self.assertRaises(ex.NodeGroupCannotBeScaled,
                              plugin._validate_existing_ng_scaling, cluster,
                              master_id)
예제 #20
0
    def _set_cluster_info(self, cluster):
        nn = vu.get_namenode(cluster)
        rm = vu.get_resourcemanager(cluster)
        hs = vu.get_historyserver(cluster)
        oo = vu.get_oozie(cluster)
        sp = vu.get_spark_history_server(cluster)
        info = {}

        if rm:
            info['YARN'] = {
                'Web UI':
                'http://%s:%s' % (rm.get_ip_or_dns_name(), '8088'),
                'ResourceManager':
                'http://%s:%s' % (rm.get_ip_or_dns_name(), '8032')
            }

        if nn:
            info['HDFS'] = {
                'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), '50070'),
                'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
            }

        if oo:
            info['JobFlow'] = {
                'Oozie': 'http://%s:%s' % (oo.get_ip_or_dns_name(), '11000')
            }

        if hs:
            info['MapReduce JobHistory Server'] = {
                'Web UI': 'http://%s:%s' % (hs.get_ip_or_dns_name(), '19888')
            }

        if sp:
            info['Apache Spark'] = {
                'Spark UI':
                'http://%s:%s' % (sp.management_ip, '4040'),
                'Spark History Server UI':
                'http://%s:%s' % (sp.management_ip, '18080')
            }

        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
    def test_plugin_edp_engine_no_spark(self):
        cluster_dict = {
            'name': 'cluster',
            'plugin_name': 'cdh',
            'hadoop_version': '5.7.0',
            'default_image_id': 'image'}

        job = mock.Mock()
        job.type = edp.JOB_TYPE_SPARK

        cluster = conductor.cluster_create(context.ctx(), cluster_dict)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
        edp_engine = plugin.get_edp_engine(cluster, edp.JOB_TYPE_SPARK)
        with testtools.ExpectedException(
                ex.InvalidComponentCountException,
                value_re="Hadoop cluster should contain 1 "
                         "SPARK_YARN_HISTORY_SERVER component\(s\). Actual "
                         "SPARK_YARN_HISTORY_SERVER count is 0\nError ID: .*"):

            edp_engine.validate_job_execution(cluster, job, mock.Mock())
예제 #22
0
def get_password_from_db(cluster, pwname):
    """return a password for the named entry

    This function will return, or create and return, a password for the
    named entry. It will store the password in the key manager and use
    the ID in the database entry.

    :param cluster: The cluster record containing the password
    :param pwname: The entry name associated with the password
    :returns: The cleartext password
    """
    ctx = context.ctx()
    cluster = conductor.cluster_get(ctx, cluster.id)
    passwd = cluster.extra.get(pwname) if cluster.extra else None
    if passwd:
        return key_manager.get_secret(passwd, ctx)

    passwd = uuidutils.generate_uuid()
    extra = cluster.extra.to_dict() if cluster.extra else {}
    extra[pwname] = key_manager.store_secret(passwd, ctx)
    conductor.cluster_update(ctx, cluster, {'extra': extra})
    return passwd
def provision_keypairs(cluster, instances=None):
    extra = cluster.extra.to_dict() if cluster.extra else {}
    # use same keypair for scaling
    keypair = extra.get('vanilla_keypair')
    if not instances:
        instances = utils.get_instances(cluster)
    else:
        # scaling
        if not keypair:
            # cluster created before mitaka, skipping provisioning
            return
    if not keypair:
        private, public = utils.generate_key_pair()
        keypair = {'public': public, 'private': private}
        extra['vanilla_keypair'] = keypair
        extra['vanilla_keypair']['private'] = _store_secret(keypair['private'])
        conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
    else:
        keypair['private'] = _get_secret(keypair['private'])
    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            tg.spawn('provision-key-%s' % instance.instance_name,
                     _provision_key, instance, keypair)
예제 #24
0
    def _test_engine(self, version, job_type, eng):
        cluster_dict = self._get_cluster('demo', version)

        cluster = conductor.cluster_create(context.ctx(), cluster_dict)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
        self.assertIsInstance(plugin.get_edp_engine(cluster, job_type), eng)
 def configure_cluster(self, cluster):
     self.deploy.configure_cluster(cluster)
     conductor.cluster_update(
         context.ctx(), cluster, {
             'info':
             self.cloudera_utils.get_cloudera_manager_info(cluster)})
예제 #26
0
    def _set_cluster_info(self, cluster):
        ambari_ip = plugin_utils.get_instance(
            cluster, p_common.AMBARI_SERVER).get_ip_or_dns_name()
        ambari_port = "8080"
        info = {
            p_common.AMBARI_SERVER: {
                "Web UI": "http://{host}:{port}".format(host=ambari_ip,
                                                        port=ambari_port),
                "Username": "******",
                "Password": cluster.extra["ambari_password"]
            }
        }
        nns = plugin_utils.get_instances(cluster, p_common.NAMENODE)
        info[p_common.NAMENODE] = {}
        for idx, namenode in enumerate(nns):
            info[p_common.NAMENODE][
                "Web UI %s" % (idx + 1)] = (
                "http://%s:50070" % namenode.get_ip_or_dns_name())

        rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER)
        info[p_common.RESOURCEMANAGER] = {}
        for idx, resourcemanager in enumerate(rms):
            info[p_common.RESOURCEMANAGER][
                "Web UI %s" % (idx + 1)] = (
                "http://%s:8088" % resourcemanager.get_ip_or_dns_name())

        historyserver = plugin_utils.get_instance(cluster,
                                                  p_common.HISTORYSERVER)
        if historyserver:
            info[p_common.HISTORYSERVER] = {
                "Web UI": "http://%s:19888" %
                          historyserver.get_ip_or_dns_name()
            }
        atlserver = plugin_utils.get_instance(cluster,
                                              p_common.APP_TIMELINE_SERVER)
        if atlserver:
            info[p_common.APP_TIMELINE_SERVER] = {
                "Web UI": "http://%s:8188" % atlserver.get_ip_or_dns_name()
            }
        oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER)
        if oozie:
            info[p_common.OOZIE_SERVER] = {
                "Web UI": "http://%s:11000/oozie" % oozie.get_ip_or_dns_name()
            }
        hbase_master = plugin_utils.get_instance(cluster,
                                                 p_common.HBASE_MASTER)
        if hbase_master:
            info[p_common.HBASE_MASTER] = {
                "Web UI": "http://%s:16010" % hbase_master.get_ip_or_dns_name()
            }
        falcon = plugin_utils.get_instance(cluster, p_common.FALCON_SERVER)
        if falcon:
            info[p_common.FALCON_SERVER] = {
                "Web UI": "http://%s:15000" % falcon.get_ip_or_dns_name()
            }
        storm_ui = plugin_utils.get_instance(cluster, p_common.STORM_UI_SERVER)
        if storm_ui:
            info[p_common.STORM_UI_SERVER] = {
                "Web UI": "http://%s:8744" % storm_ui.get_ip_or_dns_name()
            }
        ranger_admin = plugin_utils.get_instance(cluster,
                                                 p_common.RANGER_ADMIN)
        if ranger_admin:
            info[p_common.RANGER_ADMIN] = {
                "Web UI": "http://%s:6080" % ranger_admin.get_ip_or_dns_name(),
                "Username": "******",
                "Password": "******"
            }
        spark_hs = plugin_utils.get_instance(cluster,
                                             p_common.SPARK_JOBHISTORYSERVER)
        if spark_hs:
            info[p_common.SPARK_JOBHISTORYSERVER] = {
                "Web UI": "http://%s:18080" % spark_hs.get_ip_or_dns_name()
            }
        info.update(cluster.info.to_dict())
        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {"info": info})
        cluster = conductor.cluster_get(ctx, cluster.id)