def start_spark_history_server(master):
    sp_home = config_helper.get_spark_home(master.cluster)
    with context.set_current_instance_id(master.instance_id):
        with master.remote() as r:
            r.execute_command(
                'sudo su - -c "bash %s" hadoop' %
                os.path.join(sp_home, 'sbin/start-history-server.sh'))
def start_zk_server(instances):
    utils.add_provisioning_step(instances[0].cluster_id,
                                utils.start_process_event_message("ZooKeeper"),
                                len(instances))

    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            with context.set_current_instance_id(instance.instance_id):
                tg.spawn('ZK-start-processes-%s' % instance.instance_name,
                         _start_zk_processes, instance, 'start')
Exemplo n.º 3
0
def configure_instances(pctx, instances):
    if len(instances) == 0:
        return

    utils.add_provisioning_step(instances[0].cluster_id,
                                _("Configure instances"), len(instances))

    for instance in instances:
        with context.set_current_instance_id(instance.instance_id):
            _configure_instance(pctx, instance)
Exemplo n.º 4
0
def _disable_repos_on_inst(instance):
    with context.set_current_instance_id(instance_id=instance.instance_id):
        with instance.remote() as r:
            sudo = functools.partial(r.execute_command, run_as_root=True)
            if r.get_os_distrib() == "ubuntu":
                sudo("mv /etc/apt/sources.list /etc/apt/sources.list.tmp")
            else:
                tmp_name = "/tmp/yum.repos.d-%s" % instance.instance_id[:8]
                # moving to other folder
                sudo("mv /etc/yum.repos.d/ {fold_name}".format(
                    fold_name=tmp_name))
                sudo("mkdir /etc/yum.repos.d")
def refresh_zk_servers(cluster, to_delete_instances=None):
    instances = vu.get_zk_servers(cluster)
    if to_delete_instances:
        for instance in to_delete_instances:
            if instance in instances:
                instances.remove(instance)

    utils.add_provisioning_step(cluster.id,
                                utils.start_process_event_message("ZooKeeper"),
                                len(instances))

    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            with context.set_current_instance_id(instance.instance_id):
                tg.spawn('ZK-restart-processes-%s' % instance.instance_name,
                         _start_zk_processes, instance, 'restart')
def _provision_key(instance, keypair):
    def append_to(remote, file, *args, **kwargs):
        kwargs['run_as_root'] = True
        path = "/home/hadoop/.ssh/%s" % file
        remote.append_to_file(path, *args, **kwargs)

    public, private = keypair['public'], keypair['private']
    folder = '/home/hadoop/.ssh'
    with context.set_current_instance_id(instance_id=instance.instance_id):
        with instance.remote() as r:
            r.execute_command('sudo mkdir -p %s' % folder)
            append_to(r, 'authorized_keys', public)
            append_to(r, 'id_rsa', private)
            append_to(r, 'id_rsa.pub', public)
            r.execute_command('sudo chown -R hadoop %s' % folder)
            r.execute_command("sudo chmod 600 %s/id_rsa" % folder)
        LOG.debug("Passwordless ssh enabled")
def start_dn_nm_processes(instances):
    filternames = ['datanode', 'nodemanager']
    instances = utils.instances_with_services(instances, filternames)

    if len(instances) == 0:
        return

    utils.add_provisioning_step(
        instances[0].cluster_id,
        utils.start_process_event_message("DataNodes, NodeManagers"),
        len(instances))

    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            with context.set_current_instance_id(instance.instance_id):
                processes = set(instance.node_group.node_processes)
                processes = processes.intersection(filternames)
                tg.spawn('vanilla-start-processes-%s' % instance.instance_name,
                         _start_processes, instance, list(processes))
def start_oozie_process(pctx, instance):
    with context.set_current_instance_id(instance.instance_id):
        with instance.remote() as r:
            if config_helper.is_mysql_enabled(pctx, instance.cluster):
                _start_mysql(r)
                LOG.debug("Creating Oozie DB Schema")
                sql_script = utils.get_file_text(
                    'plugins/vanilla/hadoop2/resources/create_oozie_db.sql',
                    'sahara_plugin_vanilla')

                password = oozie_helper.get_oozie_mysql_configs(
                    instance.cluster)['oozie.service.JPAService.jdbc.password']
                sql_script = sql_script.replace("password", password)

                script_location = "create_oozie_db.sql"
                r.write_file_to(script_location, sql_script)
                r.execute_command('mysql -u root < %(script_location)s && '
                                  'rm %(script_location)s' %
                                  {"script_location": script_location})

            _oozie_share_lib(r)
            _start_oozie(r)
def start_hiveserver_process(pctx, instance):
    with context.set_current_instance_id(instance.instance_id):
        with instance.remote() as r:
            _hive_create_warehouse_dir(r)
            _hive_copy_shared_conf(r, edp.get_hive_shared_conf_path('hadoop'))

            if config_helper.is_mysql_enabled(pctx, instance.cluster):
                oozie = vu.get_oozie(instance.node_group.cluster)
                if not oozie or instance.hostname() != oozie.hostname():
                    _start_mysql(r)

                version = instance.cluster.hadoop_version
                sql_script = utils.get_file_text(
                    'plugins/vanilla/v{}/resources/create_hive_db.sql'.format(
                        version.replace('.', '_')), 'sahara_plugin_vanilla')

                sql_script = sql_script.replace(
                    '{{password}}', u.get_hive_password(instance.cluster))
                r.write_file_to('/tmp/create_hive_db.sql', sql_script)
                _hive_create_db(r)
                _hive_metastore_start(r)
                LOG.info("Hive Metastore server at {host} has been "
                         "started".format(host=instance.hostname()))