Example #1
0
 def test_rest_client(self, get):
     client = c.IntelClient(self._get_instance(), 'rty')
     get.return_value = r.make_resp(ok=False,
                                    status_code=500,
                                    data={"message": "message"})
     self.assertRaises(iex.IntelPluginException,
                       client.services.get_services)
Example #2
0
def scale_cluster(cluster, instances):
    scale_ins_hosts = [i.fqdn() for i in instances]
    dn_hosts = [dn.fqdn() for dn in u.get_datanodes(cluster)]
    tt_hosts = [tt.fqdn() for tt in u.get_tasktrackers(cluster)]
    to_scale_dn = []
    to_scale_tt = []
    for i in scale_ins_hosts:
        if i in dn_hosts:
            to_scale_dn.append(i)

        if i in tt_hosts:
            to_scale_tt.append(i)

    client = c.IntelClient(u.get_instance(cluster, 'manager'), cluster.name)
    rack = '/Default'
    client.nodes.add(scale_ins_hosts, rack, 'hadoop',
                     '/home/hadoop/.ssh/id_rsa')
    client.cluster.install_software(scale_ins_hosts)

    if to_scale_tt:
        client.services.mapred.add_nodes('TaskTracker', to_scale_tt)

    if to_scale_dn:
        client.services.hdfs.add_nodes('DataNode', to_scale_dn)

    client.nodes.config()

    if to_scale_dn:
        client.services.hdfs.start()

    if to_scale_tt:
        client.services.mapred.start()
Example #3
0
def install_cluster(cluster):
    mng_instance = u.get_instance(cluster, 'manager')

    all_hosts = list(set([i.fqdn() for i in u.get_instances(cluster)]))

    client = c.IntelClient(mng_instance, cluster.name)

    LOG.info("Create cluster")
    client.cluster.create()

    LOG.info("Add nodes to cluster")
    rack = '/Default'
    client.nodes.add(all_hosts, rack, 'hadoop', '/home/hadoop/.ssh/id_rsa')

    LOG.info("Install software")
    client.cluster.install_software(all_hosts)

    LOG.info("Configure services")
    _configure_services(client, cluster)

    LOG.info("Deploy cluster")
    client.nodes.config(force=True)

    LOG.info("Provisioning configs")
    # cinder and ephemeral drive support
    _configure_storage(client, cluster)
    # swift support
    _configure_swift(client, cluster)
    # user configs
    _add_user_params(client, cluster)

    LOG.info("Format HDFS")
    client.services.hdfs.format()
Example #4
0
    def test_session_op(self, get, post):
        client = c.IntelClient(self._get_instance(), 'rty')

        data1 = {
            "items": [{
                "nodeprogress": {
                    "hostname": 'host',
                    'info': 'info\n'
                }
            }]
        }
        data2 = {
            "items": [{
                "nodeprogress": {
                    "hostname": 'host',
                    'info': '_ALLFINISH\n'
                }
            }]
        }

        get.side_effect = (r.make_resp(data1), r.make_resp(data2))
        post.return_value = r.make_resp(SESSION_POST_DATA)

        client.services.hdfs.format()

        self.assertEqual(get.call_count, 2)
        self.assertEqual(post.call_count, 1)
Example #5
0
    def test_hdfs_services_op(self, get, post):
        client = c.IntelClient(self._get_instance(), 'rty')

        # format
        get.return_value = r.make_resp(SESSION_GET_DATA)
        post.return_value = r.make_resp(SESSION_POST_DATA)
        client.services.hdfs.format()

        # decommission
        post.return_value = r.make_resp()
        client.services.hdfs.decommission_nodes(['n1'])

        # get status
        get.return_value = r.make_resp(
            data={"items": [{
                "hostname": "n1",
                "status": "start"
            }]})
        client.services.hdfs.get_datanodes_status()
        self.assertEqual(client.services.hdfs.get_datanode_status('n1'),
                         'start')
        self.assertRaises(iex.IntelPluginException,
                          client.services.hdfs.get_datanode_status, 'n2')

        self.assertEqual(get.call_count, 4)
        self.assertEqual(post.call_count, 2)
Example #6
0
    def test_params_op(self, post, put):
        client = c.IntelClient(self._get_instance(), 'rty')
        post.return_value = r.make_resp()
        put.return_value = r.make_resp()

        # add
        client.params.hdfs.add('lelik', 'bolik')
        client.params.hadoop.add('lelik', 'bolik')
        client.params.mapred.add('lelik', 'bolik')

        # get
        self.assertRaises(ex.NotImplementedException, client.params.hdfs.get,
                          ['n1'], 'lelik')
        self.assertRaises(ex.NotImplementedException, client.params.hadoop.get,
                          ['n1'], 'lelik')
        self.assertRaises(ex.NotImplementedException, client.params.mapred.get,
                          ['n1'], 'lelik')

        # update
        client.params.hdfs.update('lelik', 'bolik', nodes=['n1'])
        client.params.hdfs.update('lelik', 'bolik')
        client.params.hadoop.update('lelik', 'bolik', nodes=['n1'])
        client.params.hadoop.update('lelik', 'bolik')
        client.params.mapred.update('lelik', 'bolik', nodes=['n1'])
        client.params.mapred.update('lelik', 'bolik')

        self.assertEqual(post.call_count, 3)
        self.assertEqual(put.call_count, 6)
Example #7
0
    def test_services_op(self, get, post, delete):
        client = c.IntelClient(self._get_instance(), 'rty')

        # add
        post.return_value = r.make_resp()
        client.services.add(['hdfs', 'mapred'])

        # get_services
        get.return_value = r.make_resp()
        client.services.get_services()

        # delete_service
        delete.return_value = r.make_resp()
        client.services.delete_service('hdfs')
Example #8
0
    def test_nodes_op(self, get, post, delete):
        client = c.IntelClient(self._get_instance(), 'rty')

        # add
        post.return_value = r.make_resp(
            data={
                "items": [{
                    "iporhostname": "n1",
                    "info": "Connected"
                }, {
                    "iporhostname": "n2",
                    "info": "Connected"
                }]
            })
        client.nodes.add(['n1', 'n2'], 'hadoop', '/Def', '/tmp/key')
        post.return_value = r.make_resp(
            data={"items": [{
                "iporhostname": "n1",
                "info": "bla-bla"
            }]})
        self.assertRaises(iex.IntelPluginException, client.nodes.add, ['n1'],
                          'hadoop', '/Def', '/tmp/key')

        # config
        post.return_value = r.make_resp(SESSION_POST_DATA)
        get.return_value = r.make_resp(SESSION_GET_DATA)
        client.nodes.config()

        # delete
        delete.return_value = r.make_resp()
        client.nodes.delete(['n1'])

        # get
        get.return_value = r.make_resp()
        client.nodes.get()

        # get_status
        get.return_value = r.make_resp(data={"status": "running"})
        client.nodes.get_status(['n1'])

        # stop_nodes
        post.return_value = r.make_resp()
        client.nodes.stop(['n1'])

        self.assertEqual(delete.call_count, 1)
        self.assertEqual(post.call_count, 4)
        self.assertEqual(get.call_count, 3)
Example #9
0
def start_cluster(cluster):
    client = c.IntelClient(u.get_instance(cluster, 'manager'), cluster.name)

    LOG.debug("Starting hadoop services")
    client.services.hdfs.start()

    if u.get_jobtracker(cluster):
        client.services.mapred.start()

    if u.get_hiveserver(cluster):
        client.services.hive.start()

    if u.get_oozie(cluster):
        LOG.info("Setup oozie")
        _setup_oozie(cluster)

        client.services.oozie.start()
Example #10
0
    def test_cluster_op(self, get, post):
        client = c.IntelClient(self._get_instance(), 'rty')

        data = {'lelik': 'bolik'}

        post.return_value = r.make_resp(data)
        self.assertEqual(client.cluster.create(), data)

        get.return_value = r.make_resp(data)
        self.assertEqual(client.cluster.get(), data)

        post.return_value = r.make_resp(SESSION_POST_DATA)
        get.return_value = r.make_resp(SESSION_GET_DATA)
        client.cluster.install_software(['bla-bla'])

        self.assertEqual(post.call_count, 2)
        self.assertEqual(get.call_count, 2)
Example #11
0
def decommission_nodes(cluster, instances):
    dec_hosts = [i.fqdn() for i in instances]
    dn_hosts = [dn.fqdn() for dn in u.get_datanodes(cluster)]
    tt_hosts = [dn.fqdn() for dn in u.get_tasktrackers(cluster)]

    client = c.IntelClient(u.get_instance(cluster, 'manager'), cluster.name)

    dec_dn_hosts = []
    for dec_host in dec_hosts:
        if dec_host in dn_hosts:
            dec_dn_hosts.append(dec_host)

    if dec_dn_hosts:
        client.services.hdfs.decommission_nodes(dec_dn_hosts)

        #TODO(alazarev) make timeout configurable (bug #1262897)
        timeout = 14400  # 4 hours
        cur_time = 0
        for host in dec_dn_hosts:
            while cur_time < timeout:
                if client.services.hdfs.get_datanode_status(
                        host) == 'Decomissioned':
                    break
                context.sleep(5)
                cur_time += 5
            else:
                LOG.warn("Failed to decomission node '%s' of cluster '%s' "
                         "in %s minutes" % (host, cluster.name, timeout / 60))

    client.nodes.stop(dec_hosts)

    # wait stop services
    #TODO(alazarev) make timeout configurable (bug #1262897)
    timeout = 600  # 10 minutes
    cur_time = 0
    for instance in instances:
        while cur_time < timeout:
            stopped = True
            if instance.fqdn() in dn_hosts:
                code, out = instance.remote().execute_command(
                    'sudo /sbin/service hadoop-datanode status',
                    raise_when_error=False)
                if out.strip() != 'datanode is stopped':
                    stopped = False
                if out.strip() == 'datanode dead but pid file exists':
                    instance.remote().execute_command(
                        'sudo rm -f '
                        '/var/run/hadoop/hadoop-hadoop-datanode.pid')
            if instance.fqdn() in tt_hosts:
                code, out = instance.remote().execute_command(
                    'sudo /sbin/service hadoop-tasktracker status',
                    raise_when_error=False)
                if out.strip() != 'tasktracker is stopped':
                    stopped = False
            if stopped:
                break
            else:
                context.sleep(5)
                cur_time += 5
        else:
            LOG.warn("Failed to stop services on node '%s' of cluster '%s' "
                     "in %s minutes" % (instance, cluster.name, timeout / 60))

    for node in dec_hosts:
        LOG.info("Deleting node '%s' on cluster '%s'" % (node, cluster.name))
        client.nodes.delete(node)
Example #12
0
    def test_base_services_op(self, get, post):
        client = c.IntelClient(self._get_instance(), 'rty')

        # start
        post.return_value = r.make_resp()
        get.return_value = r.make_resp(
            data={
                "items": [{
                    "serviceName": "hdfs",
                    "status": "running"
                }, {
                    "serviceName": "mapred",
                    "status": "running"
                }]
            })
        client.services.hdfs.start()
        client.services.mapred.start()

        get.return_value = r.make_resp(
            data={
                "items": [{
                    "serviceName": "hdfs",
                    "status": "stopped"
                }, {
                    "serviceName": "mapred",
                    "status": "stopped"
                }]
            })

        self.assertRaises(iex.IntelPluginException, client.services.hdfs.start)
        self.assertRaises(iex.IntelPluginException,
                          client.services.mapred.start)

        # stop
        post.return_value = r.make_resp()
        client.services.hdfs.stop()
        client.services.mapred.stop()

        # service
        get.return_value = r.make_resp(
            data={"items": [{
                "serviceName": "bla-bla",
                "status": "fail"
            }]})

        self.assertRaises(iex.IntelPluginException,
                          client.services.hdfs.status)
        self.assertRaises(iex.IntelPluginException,
                          client.services.mapred.status)

        # get_nodes
        get.return_value = r.make_resp()
        client.services.hdfs.get_nodes()
        client.services.mapred.get_nodes()

        # add_nodes
        post.return_value = r.make_resp()
        client.services.hdfs.add_nodes('DataNode', ['n1', 'n2'])
        client.services.mapred.add_nodes('NameNode', ['n1', 'n2'])

        self.assertEqual(get.call_count, 606)
        self.assertEqual(post.call_count, 8)