Пример #1
0
    def test__set_ambari_credentials__admin_only(self, client):
        client.side_effect = self._get_test_request
        self.requests = []
        plugin = ap.AmbariPlugin()

        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/versions/version_1_3_2/resources/'
            'default-cluster.template')
        cluster_spec = cs.ClusterSpec(cluster_config_file)

        ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080', 'admin',
                                    'old-pwd')
        plugin._set_ambari_credentials(cluster_spec, ambari_info, '1.3.2')

        self.assertEqual(1, len(self.requests))
        request = self.requests[0]
        self.assertEqual('put', request.method)
        self.assertEqual('http://111.11.1111:8080/api/v1/users/admin',
                         request.url)
        self.assertEqual(
            '{"Users":{"roles":"admin","password":"******",'
            '"old_password":"******"} }', request.data)
        self.assertEqual(('admin', 'old-pwd'), request.auth)
        self.assertEqual('admin', ambari_info.user)
        self.assertEqual('admin', ambari_info.password)
Пример #2
0
    def test_parse_default_with_hosts(self, patched):
        patched.side_effect = test_get_instance_info
        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/resources/default-cluster.template')

        servers = []
        server1 = TestServer('host1',
                             'master',
                             '11111',
                             3,
                             '111.11.1111',
                             '222.11.1111',
                             node_processes=[
                                 "namenode", "jobtracker",
                                 "secondary_namenode", "ganglia_server",
                                 "ganglia_monitor", "nagios_server",
                                 "AMBARI_SERVER", "ambari_agent"
                             ])
        server2 = TestServer('host2', 'slave', '11111', 3, '222.22.2222',
                             '333.22.2222')
        servers.append(server1)
        servers.append(server2)

        cluster = TestCluster()
        cluster.instances = servers

        cluster_config = cs.ClusterSpec(cluster_config_file, cluster)

        self._assert_services(cluster_config.services)
        self._assert_configurations(cluster_config.configurations)
        self._assert_host_role_mappings(cluster_config.node_groups)

        return cluster_config
Пример #3
0
    def test__get_ambari_info(self, patched):
        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/versions/version_1_3_2/resources/'
            'default-cluster.template')

        test_host = base.TestServer('host1', 'test-master', '11111', 3,
                                    '111.11.1111', '222.11.1111')

        node_group = base.TestNodeGroup('ng1', [test_host], [
            "AMBARI_SERVER", "NAMENODE", "DATANODE", "JOBTRACKER",
            "TASKTRACKER"
        ])
        cluster = base.TestCluster([node_group])
        cluster_config = cs.ClusterSpec(cluster_config_file)
        cluster_config.create_operational_config(cluster, [])
        plugin = ap.AmbariPlugin()

        #change port
        cluster_config.configurations['ambari']['server.port'] = '9000'

        ambari_info = plugin.get_ambari_info(cluster_config)
        self.assertEqual('9000', ambari_info.port)

        #remove port
        del cluster_config.configurations['ambari']['server.port']
        ambari_info = plugin.get_ambari_info(cluster_config)

        self.assertEqual('8080', ambari_info.port)
Пример #4
0
    def test__set_ambari_credentials__admin_only(self):
        self.requests = []
        plugin = ap.AmbariPlugin()
        plugin._get_rest_request = self._get_test_request

        with open(
                os.path.join(os.path.realpath('../plugins'), 'hdp',
                             'resources', 'default-cluster.template'),
                'r') as f:
            cluster_spec = cs.ClusterSpec(f.read())

        ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080', 'admin',
                                    'old-pwd')
        plugin._set_ambari_credentials(cluster_spec, ambari_info)

        self.assertEqual(1, len(self.requests))
        request = self.requests[0]
        self.assertEqual('put', request.method)
        self.assertEqual('http://111.11.1111:8080/api/v1/users/admin',
                         request.url)
        self.assertEqual(
            '{"Users":{"roles":"admin,user","password":"******",'
            '"old_password":"******"} }', request.data)
        self.assertEqual(('admin', 'old-pwd'), request.auth)
        self.assertEqual('admin', ambari_info.user)
        self.assertEqual('admin', ambari_info.password)
Пример #5
0
    def scale_cluster(self, cluster, instances):
        processor = self._get_blueprint_processor(cluster)
        cluster_spec = clusterspec.ClusterSpec(
            json.dumps(processor.blueprint), cluster=cluster)
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(h.HadoopServer(instance,
                                          cluster_spec.node_groups
                                          [host_role],
                                          ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec,
                                           self._get_servers(cluster))

        for server in servers:
            self._spawn('Ambari provisioning thread',
                        server.provision_ambari, ambari_info)

        self._wait_for_host_registrations(self._get_num_hosts(cluster),
                                          ambari_info)

        #  now add the hosts and the component
        self._add_hosts_and_components(cluster_spec, servers,
                                       ambari_info, cluster.name)

        self._install_and_start_components(cluster.name, servers, ambari_info)
Пример #6
0
    def convert(self, config, plugin_name, version, cluster_template_create):
        normalized_config = clusterspec.ClusterSpec(config).normalize()

        #TODO(jspeidel):  can we get the name (first arg) from somewhere?

        node_groups = []
        for ng in normalized_config.node_groups:
            node_group = {
                "name": ng.name,
                "flavor_id": ng.flavor,
                "node_processes": ng.node_processes,
                "count": ng.count
            }
            node_groups.append(node_group)

        cluster_configs = dict()
        for entry in normalized_config.cluster_configs:
            ci = entry.config
            # get the associated service dictionary
            target = entry.config.applicable_target
            service_dict = cluster_configs.get(target, {})
            service_dict[ci.name] = entry.value
            cluster_configs[target] = service_dict

        ctx = context.ctx()
        return cluster_template_create(
            ctx, {
                "name": uuidutils.generate_uuid(),
                "plugin_name": plugin_name,
                "hadoop_version": version,
                "node_groups": node_groups,
                "cluster_configs": cluster_configs
            })
Пример #7
0
def create_clusterspec(hdp_version='1.3.2'):
    version_suffix = hdp_version.replace('.', '_')
    cluster_config_file = pkg.resource_string(
        version.version_info.package,
        'plugins/hdp/versions/version_{0}/resources/'
        'default-cluster.template'.format(version_suffix))

    return cs.ClusterSpec(cluster_config_file, version=hdp_version)
Пример #8
0
    def get_cluster_spec(self, cluster, user_inputs,
                         scaled_groups=None, cluster_template=None):
        if cluster_template:
            cluster_spec = cs.ClusterSpec(cluster_template, '2.0.6')
        else:
            cluster_spec = self.get_default_cluster_configuration()
            cluster_spec.create_operational_config(
                cluster, user_inputs, scaled_groups)

        return cluster_spec
Пример #9
0
    def test_ambari_rpm(self):
        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/resources/default-cluster.template')

        cluster_config = cs.ClusterSpec(cluster_config_file)

        self._assert_configurations(cluster_config.configurations)
        ambari_config = cluster_config.configurations['ambari']
        self.assertIsNotNone('no rpm uri found',
                             ambari_config.get('rpm', None))
Пример #10
0
    def test_parse_default(self):
        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/resources/default-cluster.template')

        cluster_config = cs.ClusterSpec(cluster_config_file)

        self._assert_services(cluster_config.services)
        self._assert_configurations(cluster_config.configurations)
        self._assert_host_role_mappings(cluster_config.node_groups)

        return cluster_config
Пример #11
0
    def test_convert(self, ctx_func):
        plugin = ap.AmbariPlugin()
        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/versions/1_3_2/resources/default-cluster.template')
        cluster = plugin.convert(cluster_config_file, 'ambari', '1.3.2',
                                 'test-plugin', create_cluster_template)
        normalized_config = cs.ClusterSpec(cluster_config_file).normalize()

        self.assertEqual(normalized_config.hadoop_version,
                         cluster.hadoop_version)
        self.assertEqual(len(normalized_config.node_groups),
                         len(cluster.node_groups))
Пример #12
0
    def test_update_ambari_info_credentials(self):
        plugin = ap.AmbariPlugin()

        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/versions/1_3_2/resources/default-cluster.template')
        cluster_spec = cs.ClusterSpec(cluster_config_file)

        ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080', 'admin',
                                    'old-pwd')
        plugin._update_ambari_info_credentials(cluster_spec, ambari_info)

        self.assertEqual('admin', ambari_info.user)
        self.assertEqual('admin', ambari_info.password)
Пример #13
0
    def test_convert(self, ctx_func):
        plugin = ap.AmbariPlugin()
        with open(
                os.path.join(os.path.realpath('../plugins'), 'hdp',
                             'resources', 'default-cluster.template'),
                'r') as f:
            cluster = plugin.convert(f.read(), 'ambari', '1.3.0',
                                     create_cluster_template)
        with open(
                os.path.join(os.path.realpath('../plugins'), 'hdp',
                             'resources', 'default-cluster.template'),
                'r') as f:
            normalized_config = cs.ClusterSpec(f.read()).normalize()

        self.assertEqual(normalized_config.hadoop_version,
                         cluster.hadoop_version)
        self.assertEqual(len(normalized_config.node_groups),
                         len(cluster.node_groups))
Пример #14
0
    def test__get_ambari_info(self):
        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/resources/default-cluster.template')

        cluster_config = cs.ClusterSpec(cluster_config_file)
        plugin = ap.AmbariPlugin()

        #change port
        cluster_config.configurations['ambari']['server.port'] = '9000'
        ambari_info = plugin.get_ambari_info(
            cluster_config, [TestHost('111.11.1111', 'master')])

        self.assertEqual('9000', ambari_info.port)

        #remove port
        del cluster_config.configurations['ambari']['server.port']
        ambari_info = plugin.get_ambari_info(
            cluster_config, [TestHost('111.11.1111', 'master')])

        self.assertEqual('8080', ambari_info.port)
Пример #15
0
    def test__set_ambari_credentials__no_admin_user(self):
        self.requests = []
        plugin = ap.AmbariPlugin()
        plugin._get_rest_request = self._get_test_request

        with open(
                os.path.join(os.path.realpath('../plugins'), 'hdp',
                             'resources', 'default-cluster.template'),
                'r') as f:
            cluster_spec = cs.ClusterSpec(f.read())

        for service in cluster_spec.services:
            if service.name == 'AMBARI':
                user = service.users[0]
                user.name = 'test'
                user.password = '******'
                user.groups = ['user']

        self.assertRaises(
            ex.HadoopProvisionError,
            plugin._set_ambari_credentials(cluster_spec, '111.11.1111'))
Пример #16
0
    def test__set_ambari_credentials__new_user_with_admin(self, client):
        self.requests = []
        plugin = ap.AmbariPlugin()
        client.side_effect = self._get_test_request

        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/versions/version_1_3_2/resources/'
            'default-cluster.template')
        cluster_spec = cs.ClusterSpec(cluster_config_file)

        for service in cluster_spec.services:
            if service.name == 'AMBARI':
                new_user = cs.User('test', 'test_pw', ['user'])
                service.users.append(new_user)

        ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080', 'admin',
                                    'old-pwd')
        plugin._set_ambari_credentials(cluster_spec, ambari_info, '1.3.2')
        self.assertEqual(2, len(self.requests))

        request = self.requests[0]
        self.assertEqual('put', request.method)
        self.assertEqual('http://111.11.1111:8080/api/v1/users/admin',
                         request.url)
        self.assertEqual(
            '{"Users":{"roles":"admin","password":"******",'
            '"old_password":"******"} }', request.data)
        self.assertEqual(('admin', 'old-pwd'), request.auth)

        request = self.requests[1]
        self.assertEqual('post', request.method)
        self.assertEqual('http://111.11.1111:8080/api/v1/users/test',
                         request.url)
        self.assertEqual('{"Users":{"password":"******","roles":"user"} }',
                         request.data)
        self.assertEqual(('admin', 'admin'), request.auth)

        self.assertEqual('admin', ambari_info.user)
        self.assertEqual('admin', ambari_info.password)
Пример #17
0
    def test__set_ambari_credentials__new_user_with_admin(self):
        self.requests = []
        plugin = ap.AmbariPlugin()
        plugin._get_rest_request = self._get_test_request

        with open(
                os.path.join(os.path.realpath('../plugins'), 'hdp',
                             'resources', 'default-cluster.template'),
                'r') as f:
            cluster_spec = cs.ClusterSpec(f.read())

        for service in cluster_spec.services:
            if service.name == 'AMBARI':
                new_user = cs.User('test', 'test_pw', ['user'])
                service.users.append(new_user)

        ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080', 'admin',
                                    'old-pwd')
        plugin._set_ambari_credentials(cluster_spec, ambari_info)
        self.assertEqual(2, len(self.requests))

        request = self.requests[0]
        self.assertEqual('put', request.method)
        self.assertEqual('http://111.11.1111:8080/api/v1/users/admin',
                         request.url)
        self.assertEqual(
            '{"Users":{"roles":"admin,user","password":"******",'
            '"old_password":"******"} }', request.data)
        self.assertEqual(('admin', 'old-pwd'), request.auth)

        request = self.requests[1]
        self.assertEqual('post', request.method)
        self.assertEqual('http://111.11.1111:8080/api/v1/users/test',
                         request.url)
        self.assertEqual('{"Users":{"password":"******","roles":"user"} }',
                         request.data)
        self.assertEqual(('admin', 'admin'), request.auth)

        self.assertEqual('admin', ambari_info.user)
        self.assertEqual('admin', ambari_info.password)
Пример #18
0
    def test_select_correct_server_for_ambari_host(self, patched):
        patched.side_effect = test_get_instance_info
        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/resources/default-cluster.template')

        servers = []
        server1 = TestServer('ambari_machine',
                             'master',
                             '11111',
                             3,
                             '111.11.1111',
                             '222.11.1111',
                             node_processes=[
                                 "namenode", "jobtracker",
                                 "secondary_namenode", "ganglia_server",
                                 "ganglia_monitor", "nagios_server",
                                 "AMBARI_SERVER", "ambari_agent"
                             ])
        server2 = TestServer('host2',
                             'slave',
                             '11111',
                             3,
                             '222.22.2222',
                             '333.22.2222',
                             node_processes=[
                                 "datanode", "tasktracker", "ganglia_monitor",
                                 "hdfs_client", "mapreduce_client",
                                 "ambari_agent"
                             ])
        servers.append(server1)
        servers.append(server2)

        cluster = TestCluster
        cluster.instances = servers

        cluster_config = cs.ClusterSpec(cluster_config_file, cluster)
        self.assertIn('ambari_machine', cluster_config.str,
                      'Ambari host not found')
Пример #19
0
    def test__set_ambari_credentials__no_admin_user(self, client):
        self.requests = []
        plugin = ap.AmbariPlugin()
        client.side_effect = self._get_test_request

        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/versions/1_3_2/resources/default-cluster.template')
        cluster_spec = cs.ClusterSpec(cluster_config_file)

        for service in cluster_spec.services:
            if service.name == 'AMBARI':
                user = service.users[0]
                user.name = 'test'
                user.password = '******'
                user.groups = ['user']

        ambari_info = ap.AmbariInfo(TestHost('111.11.1111'), '8080', 'admin',
                                    'old-pwd')
        self.assertRaises(
            ex.HadoopProvisionError,
            plugin._set_ambari_credentials(cluster_spec, ambari_info, '1.3.2'))
Пример #20
0
    def create_cluster(self, cluster, cluster_template):

        if cluster_template is None:
            raise ValueError('must supply cluster template')

        cluster_spec = clusterspec.ClusterSpec(cluster_template,
                                               cluster=cluster)

        hosts = self._get_servers(cluster)

        ambari_host = self._determine_host_for_server_component(
            'AMBARI_SERVER', cluster_spec, hosts)
        self.cluster_name_to_ambari_host_mapping[cluster.name] = ambari_host
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host,
                               cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        provisioned = self._provision_cluster(cluster.name, cluster_spec,
                                              ambari_host, servers)
        if provisioned:
            installed = self._install_services(cluster.name, ambari_host)
            if installed:
                LOG.info("Install of Hadoop stack successful.")
                # add service urls
                self._set_cluster_info(cluster, cluster_spec, hosts)
            else:
                raise ex.HadoopProvisionError(
                    'Installation of Hadoop stack failed.')

        else:
            raise ex.HadoopProvisionError(
                'Provisioning of Hadoop cluster failed.')
Пример #21
0
    def test__set_ambari_credentials__new_user_no_admin(self):
        self.requests = []
        plugin = ap.AmbariPlugin()
        plugin._get_rest_request = self._get_test_request

        with open(
                os.path.join(os.path.realpath('../plugins'), 'hdp',
                             'resources', 'default-cluster.template'),
                'r') as f:
            cluster_spec = cs.ClusterSpec(f.read())

        for service in cluster_spec.services:
            if service.name == 'AMBARI':
                user = service.users[0]
                user.name = 'test'
                user.password = '******'

        plugin._set_ambari_credentials(cluster_spec, '111.11.1111')
        self.assertEqual(2, len(self.requests))

        request = self.requests[0]
        self.assertEqual('post', request.method)
        self.assertEqual('http://111.11.1111:8080/api/v1/users/test',
                         request.url)
        self.assertEqual(
            '{"Users":{"password":"******","roles":"admin,user"'
            '} }', request.data)
        self.assertEqual(('admin', 'admin'), request.auth)

        request = self.requests[1]
        self.assertEqual('delete', request.method)
        self.assertEqual('http://111.11.1111:8080/api/v1/users/admin',
                         request.url)
        self.assertEqual(None, request.data)
        self.assertEqual(('test', 'test_pw'), request.auth)
        self.assertEqual('test', plugin.ambari_user)
        self.assertEqual('test_pw', plugin.ambari_password)
Пример #22
0
 def get_default_cluster_configuration(self):
     return cs.ClusterSpec(self._get_default_cluster_template(), '2.0.6')
Пример #23
0
 def _get_default_cluster_configuration(self):
     with open(
             os.path.join(os.path.dirname(__file__), 'resources',
                          'default-cluster.template'), 'r') as f:
         return clusterspec.ClusterSpec(f.read())
Пример #24
0
def create_clusterspec():
    cluster_config_file = pkg.resource_string(
        version.version_info.package,
        'plugins/hdp/versions/1_3_2/resources/default-cluster.template')

    return cs.ClusterSpec(cluster_config_file)
Пример #25
0
    def test_normalize(self):
        cluster_config_file = pkg.resource_string(
            version.version_info.package,
            'plugins/hdp/resources/default-cluster.template')

        cluster_config = cs.ClusterSpec(cluster_config_file)
        cluster = cluster_config.normalize()

        configs = cluster.cluster_configs
        contains_dfs_datanode_http_address = False
        contains_mapred_jobtracker_taskScheduler = False
        contains_dfs_include = False

        for entry in configs:
            config = entry.config
            # assert some random configurations across targets
            if config.name == 'dfs.datanode.http.address':
                contains_dfs_datanode_http_address = True
                self.assertEqual('string', config.type)
                self.assertEqual('0.0.0.0:50075', config.default_value)
                self.assertEqual('HDFS', config.applicable_target)

            if config.name == 'mapred.jobtracker.taskScheduler':
                contains_mapred_jobtracker_taskScheduler = True
                self.assertEqual('string', config.type)
                self.assertEqual(
                    'org.apache.hadoop.mapred.CapacityTaskScheduler',
                    config.default_value)
                self.assertEqual('MAPREDUCE', config.applicable_target)

            if config.name == 'dfs_include':
                contains_dfs_include = True
                self.assertEqual('string', config.type)
                self.assertEqual('dfs.include', config.default_value)
                self.assertEqual('HDFS', config.applicable_target)

                #            print 'Config: name: {0}, type:{1},
                # default value:{2}, target:{3}, Value:{4}'.format(
                #                config.name, config.type,
                # config.default_value,
                #  config.applicable_target, entry.value)

        self.assertTrue(contains_dfs_datanode_http_address)
        self.assertTrue(contains_mapred_jobtracker_taskScheduler)
        self.assertTrue(contains_dfs_include)
        node_groups = cluster.node_groups
        self.assertEqual(2, len(node_groups))
        contains_master_group = False
        contains_slave_group = False
        for i in range(2):
            node_group = node_groups[i]
            components = node_group.node_processes
            if node_group.name == "master":
                contains_master_group = True
                self.assertEqual(8, len(components))
                self.assertIn('NAMENODE', components)
                self.assertIn('JOBTRACKER', components)
                self.assertIn('SECONDARY_NAMENODE', components)
                self.assertIn('GANGLIA_SERVER', components)
                self.assertIn('GANGLIA_MONITOR', components)
                self.assertIn('NAGIOS_SERVER', components)
                self.assertIn('AMBARI_SERVER', components)
                self.assertIn('AMBARI_AGENT', components)
                #TODO(jspeidel): node configs
                #TODO(jspeidel): vm_requirements
            elif node_group.name == 'slave':
                contains_slave_group = True
                self.assertEqual(6, len(components))
                self.assertIn('DATANODE', components)
                self.assertIn('TASKTRACKER', components)
                self.assertIn('GANGLIA_MONITOR', components)
                self.assertIn('HDFS_CLIENT', components)
                self.assertIn('MAPREDUCE_CLIENT', components)
                self.assertIn('AMBARI_AGENT', components)
                #TODO(jspeidel): node configs
                #TODO(jspeidel): vm requirements
            else:
                self.fail('Unexpected node group: {0}'.format(node_group.name))
        self.assertTrue(contains_master_group)
        self.assertTrue(contains_slave_group)