Esempio n. 1
0
    def _extract_configs_to_extra(self, cluster):
        oozie = vu.get_oozie(cluster)
        hive = vu.get_hiveserver(cluster)

        extra = dict()

        if hive:
            extra['hive_mysql_passwd'] = six.text_type(uuid.uuid4())

        for ng in cluster.node_groups:
            extra[ng.id] = {
                'xml': c_helper.generate_xml_configs(
                    cluster, ng, extra['hive_mysql_passwd'] if hive else None),
                'setup_script': c_helper.generate_setup_script(
                    ng.storage_paths(),
                    c_helper.extract_environment_confs(ng.configuration()),
                    append_oozie=(
                        oozie and oozie.node_group.id == ng.id)
                )
            }

        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(
                cluster, CONF.enable_hypervisor_awareness)
            extra['topology_data'] = "\n".join(
                [k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra
Esempio n. 2
0
    def _extract_configs_to_extra(self, cluster):
        oozie = vu.get_oozie(cluster)
        hive = vu.get_hiveserver(cluster)

        extra = dict()

        if hive:
            extra['hive_mysql_passwd'] = self._generate_hive_mysql_password(
                cluster)

        for ng in cluster.node_groups:
            extra[ng.id] = {
                'xml':
                c_helper.generate_xml_configs(
                    cluster, ng, extra['hive_mysql_passwd'] if hive else None),
                'setup_script':
                c_helper.generate_setup_script(
                    ng.storage_paths(),
                    c_helper.extract_environment_confs(ng.configuration()),
                    append_oozie=(oozie and oozie.node_group.id == ng.id))
            }

        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(
                cluster, CONF.enable_hypervisor_awareness)
            extra['topology_data'] = "\n".join(
                [k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra
Esempio n. 3
0
    def _push_configs_to_existing_node(self, cluster, extra, instance):
        node_processes = instance.node_group.node_processes
        need_update = (c_helper.is_data_locality_enabled(cluster)
                       or 'namenode' in node_processes
                       or 'jobtracker' in node_processes
                       or 'oozie' in node_processes
                       or 'hiveserver' in node_processes)

        if not need_update:
            return

        with remote.get_remote(instance) as r:
            self._write_topology_data(r, cluster, extra)
            self._push_master_configs(r, cluster, extra, instance)
Esempio n. 4
0
    def _push_configs_to_existing_node(self, cluster, extra, instance):
        node_processes = instance.node_group.node_processes
        need_update = (c_helper.is_data_locality_enabled(cluster) or
                       'namenode' in node_processes or
                       'jobtracker' in node_processes or
                       'oozie' in node_processes or
                       'hiveserver' in node_processes)

        if not need_update:
            return

        with remote.get_remote(instance) as r:
            self._write_topology_data(r, cluster, extra)
            self._push_master_configs(r, cluster, extra, instance)
Esempio n. 5
0
    def _push_configs_to_new_node(self, cluster, extra, instance):
        ng_extra = extra[instance.node_group.id]
        private_key, public_key = c_helper.get_hadoop_ssh_keys(cluster)

        files = {
            '/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
            '/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
            '/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
            '/tmp/sahara-hadoop-init.sh': ng_extra['setup_script'],
            'id_rsa': private_key,
            'authorized_keys': public_key
        }

        key_cmd = ('sudo mkdir -p /home/hadoop/.ssh/ && '
                   'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && '
                   'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && '
                   'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}')

        with remote.get_remote(instance) as r:
            # TODO(aignatov): sudo chown is wrong solution. But it works.
            r.execute_command(
                'sudo chown -R $USER:$USER /etc/hadoop'
            )
            r.execute_command(
                'sudo chown -R $USER:$USER /opt/oozie/conf'
            )
            r.write_files_to(files)
            r.execute_command(
                'sudo chmod 0500 /tmp/sahara-hadoop-init.sh'
            )
            r.execute_command(
                'sudo /tmp/sahara-hadoop-init.sh '
                '>> /tmp/sahara-hadoop-init.log 2>&1')

            r.execute_command(key_cmd)

            if c_helper.is_data_locality_enabled(cluster):
                r.write_file_to(
                    '/etc/hadoop/topology.sh',
                    f.get_file_text(
                        'plugins/vanilla/v1_2_1/resources/topology.sh'))
                r.execute_command(
                    'sudo chmod +x /etc/hadoop/topology.sh'
                )

            self._write_topology_data(r, cluster, extra)
            self._push_master_configs(r, cluster, extra, instance)
Esempio n. 6
0
    def _push_configs_to_new_node(self, cluster, extra, instance):
        ng_extra = extra[instance.node_group.id]
        private_key, public_key = c_helper.get_hadoop_ssh_keys(cluster)

        files = {
            '/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
            '/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
            '/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
            '/tmp/sahara-hadoop-init.sh': ng_extra['setup_script'],
            'id_rsa': private_key,
            'authorized_keys': public_key
        }

        key_cmd = ('sudo mkdir -p /home/hadoop/.ssh/ && '
                   'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && '
                   'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && '
                   'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}')

        with remote.get_remote(instance) as r:
            # TODO(aignatov): sudo chown is wrong solution. But it works.
            r.execute_command(
                'sudo chown -R $USER:$USER /etc/hadoop'
            )
            r.execute_command(
                'sudo chown -R $USER:$USER /opt/oozie/conf'
            )
            r.write_files_to(files)
            r.execute_command(
                'sudo chmod 0500 /tmp/sahara-hadoop-init.sh'
            )
            r.execute_command(
                'sudo /tmp/sahara-hadoop-init.sh '
                '>> /tmp/sahara-hadoop-init.log 2>&1')

            r.execute_command(key_cmd)

            if c_helper.is_data_locality_enabled(cluster):
                r.write_file_to(
                    '/etc/hadoop/topology.sh',
                    f.get_file_text(
                        'plugins/vanilla/v1_2_1/resources/topology.sh'))
                r.execute_command(
                    'sudo chmod +x /etc/hadoop/topology.sh'
                )

            self._write_topology_data(r, cluster, extra)
            self._push_master_configs(r, cluster, extra, instance)
Esempio n. 7
0
    def _push_configs_to_new_node(self, cluster, extra, instance):
        ng_extra = extra[instance.node_group.id]
        private_key, public_key = c_helper.get_hadoop_ssh_keys(cluster)

        files = {
            "/etc/hadoop/core-site.xml": ng_extra["xml"]["core-site"],
            "/etc/hadoop/mapred-site.xml": ng_extra["xml"]["mapred-site"],
            "/etc/hadoop/hdfs-site.xml": ng_extra["xml"]["hdfs-site"],
            "/tmp/sahara-hadoop-init.sh": ng_extra["setup_script"],
            "id_rsa": private_key,
            "authorized_keys": public_key,
        }

        key_cmd = (
            "sudo mkdir -p /home/hadoop/.ssh/ && "
            "sudo mv id_rsa authorized_keys /home/hadoop/.ssh && "
            "sudo chown -R hadoop:hadoop /home/hadoop/.ssh && "
            "sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}"
        )

        with remote.get_remote(instance) as r:
            # TODO(aignatov): sudo chown is wrong solution. But it works.
            r.execute_command("sudo chown -R $USER:$USER /etc/hadoop")
            r.execute_command("sudo chown -R $USER:$USER /opt/oozie/conf")
            r.write_files_to(files)
            r.execute_command("sudo chmod 0500 /tmp/sahara-hadoop-init.sh")
            r.execute_command("sudo /tmp/sahara-hadoop-init.sh " ">> /tmp/sahara-hadoop-init.log 2>&1")

            r.execute_command(key_cmd)

            if c_helper.is_data_locality_enabled(cluster):
                r.write_file_to(
                    "/etc/hadoop/topology.sh", f.get_file_text("plugins/vanilla/v1_2_1/resources/topology.sh")
                )
                r.execute_command("sudo chmod +x /etc/hadoop/topology.sh")

            self._write_topology_data(r, cluster, extra)
            self._push_master_configs(r, cluster, extra, instance)
Esempio n. 8
0
 def _write_topology_data(self, r, cluster, extra):
     if c_helper.is_data_locality_enabled(cluster):
         topology_data = extra['topology_data']
         r.write_file_to('/etc/hadoop/topology.data', topology_data)
Esempio n. 9
0
 def _write_topology_data(self, r, cluster, extra):
     if c_helper.is_data_locality_enabled(cluster):
         topology_data = extra["topology_data"]
         r.write_file_to("/etc/hadoop/topology.data", topology_data)