Exemple #1
0
    def test_load_template_with_anti_affinity_single_ng(self):
        """This test checks Heat cluster template with Neutron enabled
           and anti-affinity feature enabled for single node process
           in single node group.
        """

        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 2,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=2)
        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1',
                                    anti_affinity=['datanode'], image_id=None)
        aa_heat_template = h.ClusterTemplate(cluster)
        aa_heat_template.add_node_group_extra(ng1['id'], 1, 'line1\nline2')
        aa_heat_template.add_node_group_extra(ng2['id'], 2, 'line2\nline3')

        self.override_config("use_neutron", True)
        main_template = h._load_template(
            'main.heat', {'resources':
                          aa_heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_aa.heat")))
Exemple #2
0
    def test_load_template_use_neutron(self):
        """This test checks Heat cluster template with Neutron enabled.
           Two NodeGroups used: 'master' with Ephemeral drive attached and
           'worker' with 2 attached volumes 10GB size each
        """

        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=2, volumes_size=10, id=2)
        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1', anti_affinity=[],
                                    image_id=None)
        heat_template = h.ClusterTemplate(cluster)
        heat_template.add_node_group_extra(ng1['id'], 1, 'line1\nline2')
        heat_template.add_node_group_extra(ng2['id'], 1, 'line2\nline3')

        self.override_config("use_neutron", True)
        main_template = h._load_template(
            'main.heat', {'resources':
                          heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_use_neutron.heat")))
Exemple #3
0
    def test_load_template_use_neutron(self):
        """This test checks Heat cluster template with Neutron enabled.
           Two NodeGroups used: 'master' with Ephemeral drive attached and
           'worker' with 2 attached volumes 10GB size each
        """

        ng1 = tu._make_ng_dict('master', 42, ['namenode'], 1,
                               floating_ip_pool='floating', image_id=None,
                               volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu._make_ng_dict('worker', 42, ['datanode'], 1,
                               floating_ip_pool='floating', image_id=None,
                               volumes_per_node=2, volumes_size=10, id=2)
        cluster = tu._create_cluster("cluster", "tenant1", "general",
                                     "1.2.1", [ng1, ng2],
                                     user_keypair_id='user_key',
                                     neutron_management_network='private_net',
                                     default_image_id='1', anti_affinity=[],
                                     image_id=None)
        heat_template = h.ClusterTemplate(cluster)
        heat_template.add_node_group_extra(ng1['id'], 1, 'line1\nline2')
        heat_template.add_node_group_extra(ng2['id'], 1, 'line2\nline3')

        h.CONF.set_override("use_neutron", True)
        try:
            main_template = h._load_template(
                'main.heat', {'resources':
                              heat_template._serialize_resources()})

            self.assertEqual(
                json.loads(main_template),
                json.loads(f.get_file_text(
                    "tests/unit/resources/"
                    "test_serialize_resources_use_neutron.heat")))
        finally:
            h.CONF.clear_override("use_neutron")
Exemple #4
0
    def test_load_template_with_anti_affinity_single_ng(self):
        """This test checks Heat cluster template with Neutron enabled
           and anti-affinity feature enabled for single node process
           in single node group.
        """

        ng1 = tu._make_ng_dict('master', 42, ['namenode'], 1,
                               floating_ip_pool='floating', image_id=None,
                               volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu._make_ng_dict('worker', 42, ['datanode'], 2,
                               floating_ip_pool='floating', image_id=None,
                               volumes_per_node=0, volumes_size=0, id=2)
        cluster = tu._create_cluster("cluster", "tenant1", "general",
                                     "1.2.1", [ng1, ng2],
                                     user_keypair_id='user_key',
                                     neutron_management_network='private_net',
                                     default_image_id='1',
                                     anti_affinity=['datanode'], image_id=None)
        aa_heat_template = h.ClusterTemplate(cluster)
        aa_heat_template.add_node_group_extra(ng1['id'], 1, 'line1\nline2')
        aa_heat_template.add_node_group_extra(ng2['id'], 2, 'line2\nline3')

        h.CONF.use_neutron = True

        main_template = h._load_template(
            'main.heat', {'resources':
                          aa_heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_aa.heat")))
Exemple #5
0
 def _push_hive_configs(self, cluster, ng_extra, hive_mysql_passwd, r):
     files = {'/opt/hive/conf/hive-site.xml': ng_extra['xml']['hive-site']}
     if c_helper.is_mysql_enable(cluster):
         sql_script = f.get_file_text(
             'plugins/vanilla/resources/create_hive_db.sql')
         sql_script = sql_script.replace('pass', hive_mysql_passwd)
         files.update({'/tmp/create_hive_db.sql': sql_script})
     r.write_files_to(files)
Exemple #6
0
 def configure_topology(self, topology_str, r):
     r.write_file_to(
         '/etc/hadoop/conf/topology.sh',
         f.get_file_text(
             'plugins/hdp/versions/version_1_3_2/resources/topology.sh'))
     r.execute_command('chmod +x /etc/hadoop/conf/topology.sh',
                       run_as_root=True)
     r.write_file_to('/etc/hadoop/conf/topology.data', topology_str)
Exemple #7
0
    def _push_oozie_configs(self, cluster, ng_extra, r):
        r.write_file_to('/opt/oozie/conf/oozie-site.xml',
                        ng_extra['xml']['oozie-site'])

        if c_helper.is_mysql_enable(cluster):
            sql_script = f.get_file_text(
                'plugins/vanilla/resources/create_oozie_db.sql')
            files = {'/tmp/create_oozie_db.sql': sql_script}
            r.write_files_to(files)
Exemple #8
0
 def configure_topology(self, topology_str, r):
     r.write_file_to(
         '/etc/hadoop/conf/topology.sh',
         f.get_file_text(
             'plugins/hdp/versions/1_3_2/resources/topology.sh'))
     r.execute_command(
         'sudo chmod +x /etc/hadoop/conf/topology.sh'
     )
     r.write_file_to('/etc/hadoop/conf/topology.data', topology_str)
Exemple #9
0
    def _push_oozie_configs(self, cluster, ng_extra, r):
        r.write_file_to('/opt/oozie/conf/oozie-site.xml',
                        ng_extra['xml']['oozie-site'])

        if c_helper.is_mysql_enable(cluster):
            sql_script = f.get_file_text(
                'plugins/vanilla/resources/create_oozie_db.sql')
            files = {
                '/tmp/create_oozie_db.sql': sql_script
            }
            r.write_files_to(files)
Exemple #10
0
 def _push_hive_configs(self, cluster, ng_extra, hive_mysql_passwd, r):
     files = {
         '/opt/hive/conf/hive-site.xml':
         ng_extra['xml']['hive-site']
     }
     if c_helper.is_mysql_enable(cluster):
         sql_script = f.get_file_text(
             'plugins/vanilla/resources/create_hive_db.sql'
         )
         sql_script = sql_script.replace('pass',
                                         hive_mysql_passwd)
         files.update({'/tmp/create_hive_db.sql': sql_script})
     r.write_files_to(files)
Exemple #11
0
    def _push_configs_to_new_node(self, cluster, extra, instance):
        ng_extra = extra[instance.node_group.id]
        private_key, public_key = c_helper.get_hadoop_ssh_keys(cluster)

        files = {
            '/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
            '/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
            '/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
            '/tmp/savanna-hadoop-init.sh': ng_extra['setup_script'],
            'id_rsa': private_key,
            'authorized_keys': public_key
        }

        key_cmd = 'sudo mkdir -p /home/hadoop/.ssh/ && ' \
                  'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && ' \
                  'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && ' \
                  'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}'

        with remote.get_remote(instance) as r:
            # TODO(aignatov): sudo chown is wrong solution. But it works.
            r.execute_command(
                'sudo chown -R $USER:$USER /etc/hadoop'
            )
            r.execute_command(
                'sudo chown -R $USER:$USER /opt/oozie/conf'
            )
            r.write_files_to(files)
            r.execute_command(
                'sudo chmod 0500 /tmp/savanna-hadoop-init.sh'
            )
            r.execute_command(
                'sudo /tmp/savanna-hadoop-init.sh '
                '>> /tmp/savanna-hadoop-init.log 2>&1')

            r.execute_command(key_cmd)

            if c_helper.is_data_locality_enabled(cluster):
                r.write_file_to(
                    '/etc/hadoop/topology.sh',
                    f.get_file_text(
                        'plugins/vanilla/resources/topology.sh'))
                r.execute_command(
                    'sudo chmod +x /etc/hadoop/topology.sh'
                )

            self._write_topology_data(r, cluster, extra)
            self._push_master_configs(r, cluster, extra, instance)
Exemple #12
0
    def _push_configs_to_new_node(self, cluster, extra, instance):
        ng_extra = extra[instance.node_group.id]
        private_key, public_key = c_helper.get_hadoop_ssh_keys(cluster)

        files = {
            '/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
            '/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
            '/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
            '/tmp/savanna-hadoop-init.sh': ng_extra['setup_script'],
            'id_rsa': private_key,
            'authorized_keys': public_key
        }

        key_cmd = 'sudo mkdir -p /home/hadoop/.ssh/ && ' \
                  'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && ' \
                  'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && ' \
                  'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}'

        with remote.get_remote(instance) as r:
            # TODO(aignatov): sudo chown is wrong solution. But it works.
            r.execute_command('sudo chown -R $USER:$USER /etc/hadoop')
            r.execute_command('sudo chown -R $USER:$USER /opt/oozie/conf')
            r.write_files_to(files)
            r.execute_command('sudo chmod 0500 /tmp/savanna-hadoop-init.sh')
            r.execute_command('sudo /tmp/savanna-hadoop-init.sh '
                              '>> /tmp/savanna-hadoop-init.log 2>&1')

            r.execute_command(key_cmd)

            if c_helper.is_data_locality_enabled(cluster):
                r.write_file_to(
                    '/etc/hadoop/topology.sh',
                    f.get_file_text(
                        'plugins/vanilla/v1_2_1/resources/topology.sh'))
                r.execute_command('sudo chmod +x /etc/hadoop/topology.sh')

            self._write_topology_data(r, cluster, extra)
            self._push_master_configs(r, cluster, extra, instance)
Exemple #13
0
def _load_template(template_name, fields):
    template_file = f.get_file_text('resources/%s' % template_name)
    return template_file.rstrip() % fields
Exemple #14
0
    def _push_configs_to_nodes(self, cluster, instances=None):
        extra = self._extract_configs_to_extra(cluster)

        if instances is None:
            instances = utils.get_instances(cluster)

        for inst in instances:
            ng_extra = extra[inst.node_group.id]
            files = {
                '/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
                '/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
                '/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
                '/tmp/savanna-hadoop-init.sh': ng_extra['setup_script']
            }
            with remote.get_remote(inst) as r:
                # TODO(aignatov): sudo chown is wrong solution. But it works.
                r.execute_command(
                    'sudo chown -R $USER:$USER /etc/hadoop'
                )
                r.execute_command(
                    'sudo chown -R $USER:$USER /opt/oozie/conf'
                )
                r.write_files_to(files)
                r.execute_command(
                    'sudo chmod 0500 /tmp/savanna-hadoop-init.sh'
                )
                r.execute_command(
                    'sudo /tmp/savanna-hadoop-init.sh '
                    '>> /tmp/savanna-hadoop-init.log 2>&1')

        nn = utils.get_namenode(cluster)
        jt = utils.get_jobtracker(cluster)

        with remote.get_remote(nn) as r:
            r.write_file_to('/etc/hadoop/dn.incl', utils.
                            generate_fqdn_host_names(
                            utils.get_datanodes(cluster)))
        if jt:
            with remote.get_remote(jt) as r:
                r.write_file_to('/etc/hadoop/tt.incl', utils.
                                generate_fqdn_host_names(
                                utils.get_tasktrackers(cluster)))

        oozie = utils.get_oozie(cluster)
        if oozie:
            with remote.get_remote(oozie) as r:
                r.write_file_to('/opt/oozie/conf/oozie-site.xml',
                                extra[oozie.node_group.id]
                                ['xml']['oozie-site'])
            if c_helper.is_mysql_enable(cluster):
                sql_script = f.get_file_text(
                    'plugins/vanilla/resources/create_oozie_db.sql')
                files = {
                    '/tmp/create_oozie_db.sql': sql_script
                }
                remote.get_remote(oozie).write_files_to(files)

        hive_server = utils.get_hiveserver(cluster)
        if hive_server:
            ng_extra = extra[hive_server.node_group.id]
            files = {
                '/opt/hive/conf/hive-site.xml':
                ng_extra['xml']['hive-site']
            }
            if c_helper.is_mysql_enable(cluster):
                sql_script = f.get_file_text(
                    'plugins/vanilla/resources/create_hive_db.sql'
                )
                files.update({'/tmp/create_hive_db.sql': sql_script})
            remote.get_remote(hive_server).write_files_to(files)
Exemple #15
0
def _load_template(template_name, fields):
    template_file = f.get_file_text('resources/%s' % template_name)
    return template_file.rstrip() % fields