def test_install_twice(self):
        installer = StandalonePrestoInstaller(self)
        self.upload_topology()
        cmd_output = installer.install()
        expected = self.format_err_msgs_with_internal_hosts(
            installed_all_hosts_output)

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_catalog(container)

        output = installer.install(pa_raise_error=False)

        self.default_keywords.update(installer.get_keywords())

        with open(os.path.join(LOCAL_RESOURCES_DIR, 'install_twice.txt'),
                  'r') as f:
            expected = f.read()
        expected = self.escape_for_regex(self.replace_keywords(expected))

        self.assertRegexpMatchesLineByLine(output.splitlines(),
                                           expected.splitlines())
        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_catalog(container)
Esempio n. 2
0
    def test_catalog_add_lost_host(self):
        installer = StandalonePrestoInstaller(self)
        self.setup_cluster(NoHadoopBareImageProvider, STANDALONE_PA_CLUSTER)
        self.upload_topology()
        installer.install()
        self.run_prestoadmin('catalog remove tpch')

        self.cluster.stop_host(self.cluster.slaves[0])
        self.cluster.write_content_to_host(
            'connector.name=tpch',
            os.path.join(get_catalog_directory(), 'tpch.properties'),
            self.cluster.master)
        output = self.run_prestoadmin('catalog add tpch', raise_error=False)
        for host in self.cluster.all_internal_hosts():
            deploying_message = 'Deploying tpch.properties catalog configurations on: %s'
            self.assertTrue(
                deploying_message % host in output,
                'expected %s \n actual %s' %
                (deploying_message % host, output))
        self.assertRegexpMatches(
            output,
            self.down_node_connection_error(self.cluster.internal_slaves[0]))
        self.assertEqual(
            len(output.splitlines()),
            len(self.cluster.all_hosts()) + self.len_down_node_error)
        self.run_prestoadmin('server start', raise_error=False)

        for host in [
                self.cluster.master, self.cluster.slaves[1],
                self.cluster.slaves[2]
        ]:
            self.assert_has_default_catalog(host)
        self._assert_catalogs_loaded([['system'], ['tpch']])
    def test_install_twice(self):
        installer = StandalonePrestoInstaller(self)
        self.upload_topology()
        cmd_output = installer.install()
        expected = self.format_err_msgs_with_internal_hosts(installed_all_hosts_output)

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)

        output = installer.install(pa_raise_error=False)

        self.default_keywords.update(installer.get_keywords())

        with open(os.path.join(LOCAL_RESOURCES_DIR, 'install_twice.txt'),
                  'r') as f:
            expected = f.read()
        expected = self.escape_for_regex(
            self.replace_keywords(expected))

        self.assertRegexpMatchesLineByLine(output.splitlines(),
                                           expected.splitlines())
        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
Esempio n. 4
0
    def test_catalog_add_lost_host(self):
        installer = StandalonePrestoInstaller(self)
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        self.upload_topology()
        installer.install()
        self.run_prestoadmin('catalog remove tpch')

        self.cluster.stop_host(
            self.cluster.slaves[0])
        self.cluster.write_content_to_host(
            'connector.name=tpch',
            os.path.join(get_catalog_directory(), 'tpch.properties'),
            self.cluster.master
        )
        output = self.run_prestoadmin('catalog add tpch', raise_error=False)
        for host in self.cluster.all_internal_hosts():
            deploying_message = 'Deploying tpch.properties catalog configurations on: %s'
            self.assertTrue(deploying_message % host in output,
                            'expected %s \n actual %s'
                            % (deploying_message % host, output))
        self.assertRegexpMatches(
            output,
            self.down_node_connection_error(self.cluster.internal_slaves[0])
        )
        self.assertEqual(len(output.splitlines()),
                         len(self.cluster.all_hosts()) +
                         self.len_down_node_error)
        self.run_prestoadmin('server start', raise_error=False)

        for host in [self.cluster.master,
                     self.cluster.slaves[1],
                     self.cluster.slaves[2]]:
            self.assert_has_default_catalog(host)
        self._assert_catalogs_loaded([['system'], ['tpch']])
Esempio n. 5
0
    def test_connector_add_lost_host(self):
        installer = StandalonePrestoInstaller(self)
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        self.upload_topology()
        installer.install()
        self.run_prestoadmin('connector remove tpch')

        self.cluster.stop_host(
            self.cluster.slaves[0])
        self.cluster.write_content_to_host(
            'connector.name=tpch',
            os.path.join(constants.CONNECTORS_DIR, 'tpch.properties'),
            self.cluster.master
        )
        output = self.run_prestoadmin('connector add tpch')
        for host in self.cluster.all_internal_hosts():
            deploying_message = 'Deploying tpch.properties connector ' \
                                'configurations on: %s'
            self.assertTrue(deploying_message % host in output,
                            'expected %s \n actual %s'
                            % (deploying_message % host, output))
        self.assertRegexpMatches(
            output,
            self.down_node_connection_error(self.cluster.internal_slaves[0])
        )
        self.assertEqual(len(output.splitlines()),
                         len(self.cluster.all_hosts()) +
                         self.len_down_node_error)
        self.run_prestoadmin('server start')

        for host in [self.cluster.master,
                     self.cluster.slaves[1],
                     self.cluster.slaves[2]]:
            self.assert_has_default_connector(host)
        self._assert_connectors_loaded([['system'], ['tpch']])
Esempio n. 6
0
 def test_system_info_pa_separate_node(self):
     installer = StandalonePrestoInstaller(self)
     self.setup_cluster(NoHadoopBareImageProvider(), self.PA_ONLY_CLUSTER)
     topology = {"coordinator": "slave1", "workers": ["slave2", "slave3"]}
     self.upload_topology(topology=topology)
     installer.install(coordinator='slave1')
     self.test_basic_system_info(
         coordinator=self.cluster.internal_slaves[0],
         hosts=self.cluster.slaves)
Esempio n. 7
0
 def test_system_info_pa_separate_node(self):
     installer = StandalonePrestoInstaller(self)
     self.setup_cluster(NoHadoopBareImageProvider(), self.PA_ONLY_CLUSTER)
     topology = {"coordinator": "slave1",
                 "workers": ["slave2", "slave3"]}
     self.upload_topology(topology=topology)
     installer.install(coordinator='slave1')
     self.test_basic_system_info(
         coordinator=self.cluster.internal_slaves[0],
         hosts=self.cluster.slaves)
Esempio n. 8
0
 def test_start_stop_restart_worker_down(self):
     installer = StandalonePrestoInstaller(self)
     self.setup_cluster(NoHadoopBareImageProvider(), self.PA_ONLY_CLUSTER)
     topology = {"coordinator": "slave1",
                 "workers": ["master", "slave2", "slave3"]}
     self.upload_topology(topology=topology)
     installer.install(coordinator='slave1')
     self.assert_start_stop_restart_down_node(
         self.cluster.slaves[0],
         self.cluster.internal_slaves[0])
Esempio n. 9
0
 def test_start_coordinator_down(self):
     installer = StandalonePrestoInstaller(self)
     self.setup_cluster(NoHadoopBareImageProvider, STANDALONE_PA_CLUSTER)
     topology = {"coordinator": "slave1", "workers":
                 ["master", "slave2", "slave3"]}
     self.upload_topology(topology=topology)
     installer.install(coordinator='slave1')
     self.assert_start_coordinator_down(
         self.cluster.slaves[0],
         self.cluster.internal_slaves[0])
Esempio n. 10
0
 def test_start_stop_restart_worker_down(self):
     installer = StandalonePrestoInstaller(self)
     self.setup_cluster(self.PA_ONLY_CLUSTER)
     topology = {
         "coordinator": "slave1",
         "workers": ["master", "slave2", "slave3"]
     }
     self.upload_topology(topology=topology)
     installer.install()
     self.assert_start_stop_restart_down_node(
         self.cluster.slaves[0], self.cluster.internal_slaves[0])
Esempio n. 11
0
 def test_system_info_pa_separate_node(self):
     installer = StandalonePrestoInstaller(self)
     self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
     topology = {"coordinator": "slave1", "workers": ["slave2", "slave3"]}
     self.upload_topology(topology=topology)
     installer.install(coordinator='slave1')
     self.run_prestoadmin('server start')
     actual = self.run_prestoadmin('collect system_info')
     self._test_basic_system_info(
         actual,
         coordinator=self.cluster.internal_slaves[0],
         hosts=self.cluster.slaves)
Esempio n. 12
0
 def test_system_info_pa_separate_node(self):
     installer = StandalonePrestoInstaller(self)
     self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
     topology = {"coordinator": "slave1",
                 "workers": ["slave2", "slave3"]}
     self.upload_topology(topology=topology)
     installer.install(coordinator='slave1')
     self.run_prestoadmin('server start')
     actual = self.run_prestoadmin('collect system_info')
     self._test_basic_system_info(
         actual,
         coordinator=self.cluster.internal_slaves[0],
         hosts=self.cluster.slaves)
    def test_install_when_catalog_json_exists(self):
        installer = StandalonePrestoInstaller(self)
        topology = {"coordinator": "master", "workers": ["slave1"]}
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(get_catalog_directory(), 'jmx.properties'),
            self.cluster.master)

        cmd_output = installer.install()
        expected = [
            'Deploying rpm on master...', 'Deploying rpm on slave1...',
            'Package deployed successfully on: slave1',
            'Package installed successfully on: slave1',
            'Package deployed successfully on: master',
            'Package installed successfully on: master',
            'Deploying configuration on: master',
            'Deploying jmx.properties, tpch.properties '
            'catalog configurations on: master ',
            'Deploying configuration on: slave1',
            'Deploying jmx.properties, tpch.properties '
            'catalog configurations on: slave1 ',
            'Using rpm_specifier as a local path',
            'Fetching local presto rpm at path: .*',
            'Found existing rpm at: .*'
        ]

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        for container in [self.cluster.master, self.cluster.slaves[0]]:
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_catalog(container)
            self.assert_has_jmx_catalog(container)
Esempio n. 14
0
    def test_connection_to_coord_lost(self):
        installer = StandalonePrestoInstaller(self)
        down_node = self.cluster.internal_slaves[0]
        topology = {"coordinator": down_node,
                    "workers": [self.cluster.internal_master,
                                self.cluster.internal_slaves[1],
                                self.cluster.internal_slaves[2]]}
        self.upload_topology(topology=topology)
        self.cluster.stop_host(
            self.cluster.slaves[0])

        actual_out = installer.install(
            coordinator=down_node, pa_raise_error=False)

        self.assertRegexpMatches(
            actual_out,
            self.down_node_connection_error(down_node)
        )

        for host in [self.cluster.master,
                     self.cluster.slaves[1],
                     self.cluster.slaves[2]]:
            self.assert_common_configs(host)
            self.assert_file_content(
                host,
                '/etc/presto/config.properties',
                self.default_workers_config_with_slave1_
            )
Esempio n. 15
0
    def test_install_with_java_home(self):
        installer = StandalonePrestoInstaller(self)

        with relocate_jdk_directory(self.cluster, '/usr') as new_java_home:
            topology = {
                "coordinator": "master",
                "workers": ["slave1", "slave2", "slave3"],
                "java_home": new_java_home
            }
            self.upload_topology(topology)
            self.cluster.write_content_to_host(
                'connector.name=jmx',
                os.path.join(get_catalog_directory(), 'jmx.properties'),
                self.cluster.master)

            cmd_output = installer.install()
            expected = self.format_err_msgs_with_internal_hosts(
                installed_all_hosts_output)

            actual = cmd_output.splitlines()
            self.assertRegexpMatchesLineByLine(actual, expected)

            for host in self.cluster.all_hosts():
                installer.assert_installed(self, host)
                self.assert_has_default_config(host)
                self.assert_has_default_catalog(host)
                self.assert_has_jmx_catalog(host)
    def test_connection_to_coord_lost(self):
        installer = StandalonePrestoInstaller(self)
        down_node = self.cluster.internal_slaves[0]
        topology = {
            "coordinator":
            down_node,
            "workers": [
                self.cluster.internal_master, self.cluster.internal_slaves[1],
                self.cluster.internal_slaves[2]
            ]
        }
        self.upload_topology(topology=topology)
        self.cluster.stop_host(self.cluster.slaves[0])

        actual_out = installer.install(coordinator=down_node,
                                       pa_raise_error=False)

        self.assertRegexpMatches(actual_out,
                                 self.down_node_connection_error(down_node))

        for host in [
                self.cluster.master, self.cluster.slaves[1],
                self.cluster.slaves[2]
        ]:
            self.assert_common_configs(host)
            self.assert_file_content(host, '/etc/presto/config.properties',
                                     self.default_workers_config_with_slave1_)
    def test_install_when_connector_json_exists(self):
        installer = StandalonePrestoInstaller(self)
        topology = {"coordinator": "master", "workers": ["slave1"]}
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master)

        cmd_output = installer.install()
        expected = [
            'Deploying rpm on master...', 'Deploying rpm on slave1...',
            'Package deployed successfully on: slave1',
            'Package installed successfully on: slave1',
            'Package deployed successfully on: master',
            'Package installed successfully on: master',
            'Deploying configuration on: master',
            'Deploying jmx.properties, tpch.properties '
            'connector configurations on: master ',
            'Deploying configuration on: slave1',
            'Deploying jmx.properties, tpch.properties '
            'connector configurations on: slave1 '
        ]

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in [self.cluster.master, self.cluster.slaves[0]]:
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
            self.assert_has_jmx_connector(container)
    def test_install_when_connector_json_exists(self):
        installer = StandalonePrestoInstaller(self, dummy=True)
        topology = {"coordinator": "master",
                    "workers": ["slave1"]}
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master
        )

        cmd_output = installer.install()
        expected = ['Deploying rpm on master...',
                    'Deploying rpm on slave1...',
                    'Package deployed successfully on: slave1',
                    'Package installed successfully on: slave1',
                    'Package deployed successfully on: master',
                    'Package installed successfully on: master',
                    'Deploying configuration on: master',
                    'Deploying jmx.properties, tpch.properties '
                    'connector configurations on: master ',
                    'Deploying configuration on: slave1',
                    'Deploying jmx.properties, tpch.properties '
                    'connector configurations on: slave1 ']

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in [self.cluster.master,
                          self.cluster.slaves[0]]:
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
            self.assert_has_jmx_connector(container)
Esempio n. 19
0
    def test_server_starts_java8_in_bin_java(self):
        installer = StandalonePrestoInstaller(self)
        for container in self.cluster.all_hosts():
            self.cluster.exec_cmd_on_host(container,
                                          'mv /usr/java/jdk1.8.0_40 /usr')
            self.cluster.exec_cmd_on_host(container,
                                          'ln -s /usr/jdk1.8.0_40/bin/java '
                                          '/bin/java')
        self.upload_topology()

        installer.install()

        # starts successfully with java8_home set
        output = self.run_prestoadmin('server start')
        self.assertFalse(
            'Warning: No value found for JAVA8_HOME. Default Java will be '
            'used.' in output)
Esempio n. 20
0
    def test_server_starts_java8_in_bin_java(self):
        installer = StandalonePrestoInstaller(self)
        for container in self.cluster.all_hosts():
            self.cluster.exec_cmd_on_host(container,
                                          'mv /usr/java/jdk1.8.0_40 /usr')
            self.cluster.exec_cmd_on_host(
                container, 'ln -s /usr/jdk1.8.0_40/bin/java '
                '/bin/java')
        self.upload_topology()

        installer.install()

        # starts successfully with java8_home set
        output = self.run_prestoadmin('server start')
        self.assertFalse(
            'Warning: No value found for JAVA8_HOME. Default Java will be '
            'used.' in output)
Esempio n. 21
0
    def test_query_info_pa_separate_node(self):
        installer = StandalonePrestoInstaller(self)
        self.setup_cluster(NoHadoopBareImageProvider, STANDALONE_PA_CLUSTER)
        topology = {"coordinator": "slave1",
                    "workers": ["slave2", "slave3"]}
        self.upload_topology(topology=topology)
        installer.install(coordinator='slave1')
        self.run_prestoadmin('server start')
        sql_to_run = 'SELECT * FROM system.runtime.nodes WHERE 1234 = 1234'
        query_id = self.retry(
            lambda: self.get_query_id(sql_to_run, host=self.cluster.slaves[0]))

        actual = self.run_prestoadmin('collect query_info ' + query_id)
        query_info_file_name = path.join(TMP_PRESTO_DEBUG, 'query_info_' + query_id + '.json')

        expected = 'Gathered query information in file: ' + query_info_file_name + '\n'
        self.assert_path_exists(self.cluster.master, query_info_file_name)
        self.assertEqual(actual, expected)
Esempio n. 22
0
    def test_server_starts_java8_in_bin_java(self):
        installer = StandalonePrestoInstaller(self)

        with relocate_jdk_directory(self.cluster, '/usr') as new_java_home:
            java_bin = os.path.join(new_java_home, 'bin', 'java')

            for container in self.cluster.all_hosts():
                self.cluster.exec_cmd_on_host(
                    container, 'ln -s %s /bin/java' % (java_bin,))

            self.upload_topology()

            installer.install()

            # starts successfully with java8_home set
            output = self.run_prestoadmin('server start')
            self.assertFalse(
                'Warning: No value found for JAVA8_HOME. Default Java will be '
                'used.' in output)
Esempio n. 23
0
    def test_server_starts_java_in_bin_java(self):
        installer = StandalonePrestoInstaller(self)

        with relocate_jdk_directory(self.cluster, '/usr') as new_java_home:
            java_bin = os.path.join(new_java_home, 'bin', 'java')

            for container in self.cluster.all_hosts():
                self.cluster.exec_cmd_on_host(
                    container, 'ln -s %s /bin/java' % (java_bin, ))

            self.upload_topology()

            installer.install()

            # starts successfully with java_home set
            output = self.run_prestoadmin('server start')
            self.assertFalse(
                'Warning: No value found for JAVA_HOME. Default Java will be '
                'used.' in output)
Esempio n. 24
0
    def test_query_info_pa_separate_node(self):
        installer = StandalonePrestoInstaller(self)
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        topology = {"coordinator": "slave1", "workers": ["slave2", "slave3"]}
        self.upload_topology(topology=topology)
        installer.install(coordinator='slave1')
        self.run_prestoadmin('server start')
        sql_to_run = 'SELECT * FROM system.runtime.nodes WHERE 1234 = 1234'
        with settings(roledefs={'coordinator': ['slave1']}):
            query_id = self.retry(lambda: self.get_query_id(
                sql_to_run, host=self.cluster.slaves[0]))

        actual = self.run_prestoadmin('collect query_info ' + query_id)
        query_info_file_name = path.join(TMP_PRESTO_DEBUG,
                                         'query_info_' + query_id + '.json')

        expected = 'Gathered query information in file: ' + query_info_file_name + '\n'
        self.assert_path_exists(self.cluster.master, query_info_file_name)
        self.assertEqual(actual, expected)
    def test_install_ext_host_is_pa_master(self):
        installer = StandalonePrestoInstaller(self)
        topology = {"coordinator": "slave1", "workers": ["slave2", "slave3"]}
        self.upload_topology(topology)

        cmd_output = installer.install(coordinator='slave1')
        expected = install_with_ext_host_pa_master_out

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        self.assert_installed_with_configs(
            self.cluster.slaves[0],
            [self.cluster.slaves[1], self.cluster.slaves[2]])
Esempio n. 26
0
    def test_install_with_malformed_connector(self):
        installer = StandalonePrestoInstaller(self)
        self.upload_topology()
        self.cluster.write_content_to_host(
            'connectr.typo:invalid',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master)
        actual_out = installer.install(pa_raise_error=False)
        expected = 'Underlying exception:\n    Catalog configuration ' \
                   'jmx.properties does not contain connector.name'
        self.assertRegexpMatches(actual_out, expected)

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
Esempio n. 27
0
    def test_install_when_topology_has_ips(self):
        installer = StandalonePrestoInstaller(self)
        ips = self.cluster.get_ip_address_dict()
        topology = {"coordinator": ips[self.cluster.master],
                    "workers": [ips[self.cluster.slaves[0]]]}
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master
        )

        cmd_output = installer.install().splitlines()
        expected = [
            r'Deploying rpm on %s...' % ips[self.cluster.master],
            r'Deploying rpm on %s...' % ips[self.cluster.slaves[0]],
            r'Package deployed successfully on: ' + ips[
                self.cluster.master],
            r'Package installed successfully on: ' + ips[
                self.cluster.master],
            r'Package deployed successfully on: ' +
            ips[self.cluster.slaves[0]],
            r'Package installed successfully on: ' +
            ips[self.cluster.slaves[0]],
            r'Deploying configuration on: ' +
            ips[self.cluster.master],
            r'Deploying jmx.properties, tpch.properties '
            r'connector configurations on: ' +
            ips[self.cluster.master] + r' ',
            r'Deploying configuration on: ' +
            ips[self.cluster.slaves[0]],
            r'Deploying jmx.properties, tpch.properties '
            r'connector configurations on: ' +
            ips[self.cluster.slaves[0]] + r' ',
            r'Using rpm_specifier as a local path',
            r'Fetching local presto rpm at path: .*',
            r'Found existing rpm at: .*']

        cmd_output.sort()
        expected.sort()
        self.assertRegexpMatchesLineByLine(cmd_output, expected)

        self.assert_installed_with_regex_configs(
            self.cluster.master,
            [self.cluster.slaves[0]])
        for container in [self.cluster.master,
                          self.cluster.slaves[0]]:
            self.assert_has_jmx_connector(container)
    def test_install_when_topology_has_ips(self):
        installer = StandalonePrestoInstaller(self)
        ips = self.cluster.get_ip_address_dict()
        topology = {
            "coordinator": ips[self.cluster.internal_master],
            "workers": [ips[self.cluster.internal_slaves[0]]]
        }
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(get_catalog_directory(), 'jmx.properties'),
            self.cluster.master)

        cmd_output = installer.install().splitlines()
        expected = [
            r'Deploying rpm on %s...' % ips[self.cluster.internal_master],
            r'Deploying rpm on %s...' % ips[self.cluster.internal_slaves[0]],
            r'Package deployed successfully on: ' +
            ips[self.cluster.internal_master],
            r'Package installed successfully on: ' +
            ips[self.cluster.internal_master],
            r'Package deployed successfully on: ' +
            ips[self.cluster.internal_slaves[0]],
            r'Package installed successfully on: ' +
            ips[self.cluster.internal_slaves[0]],
            r'Deploying configuration on: ' +
            ips[self.cluster.internal_master],
            r'Deploying jmx.properties, tpch.properties '
            r'catalog configurations on: ' +
            ips[self.cluster.internal_master] + r' ',
            r'Deploying configuration on: ' +
            ips[self.cluster.internal_slaves[0]],
            r'Deploying jmx.properties, tpch.properties '
            r'catalog configurations on: ' +
            ips[self.cluster.internal_slaves[0]] + r' ',
            r'Using rpm_specifier as a local path',
            r'Fetching local presto rpm at path: .*',
            r'Found existing rpm at: .*'
        ]

        cmd_output.sort()
        expected.sort()
        self.assertRegexpMatchesLineByLine(cmd_output, expected)

        self.assert_installed_with_regex_configs(self.cluster.master,
                                                 [self.cluster.slaves[0]])
        for host in [self.cluster.master, self.cluster.slaves[0]]:
            self.assert_has_jmx_catalog(host)
Esempio n. 29
0
    def test_install_ext_host_is_pa_master(self):
        installer = StandalonePrestoInstaller(self)
        topology = {"coordinator": "slave1",
                    "workers": ["slave2", "slave3"]}
        self.upload_topology(topology)

        cmd_output = installer.install(coordinator='slave1')
        expected = install_with_ext_host_pa_master_out

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        self.assert_installed_with_configs(
            self.cluster.slaves[0],
            [self.cluster.slaves[1],
             self.cluster.slaves[2]])
Esempio n. 30
0
    def test_install(self, installer=None):
        if installer is None:
            installer = StandalonePrestoInstaller(self)

        self.upload_topology()

        cmd_output = installer.install()
        expected = installed_all_hosts_output

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
Esempio n. 31
0
    def test_install_fails_java_not_found(self):
        installer = StandalonePrestoInstaller(self)
        with relocate_jdk_directory(self.cluster, '/usr'):
            self.upload_topology()
            cmd_output = installer.install(pa_raise_error=False)
            actual = cmd_output.splitlines()
            num_failures = 0
            for line in enumerate(actual):
                if str(line).find('Error: Required Java version'
                                  ' could not be found') != -1:
                    num_failures += 1

            self.assertEqual(4, num_failures)

            for container in self.cluster.all_hosts():
                installer.assert_uninstalled(container)
    def test_install_with_malformed_connector(self):
        installer = StandalonePrestoInstaller(self, dummy=True)
        self.upload_topology()
        self.cluster.write_content_to_host(
            'connectr.typo:invalid',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master
        )
        actual_out = installer.install(pa_raise_error=False)
        expected = 'Underlying exception:\n    Catalog configuration ' \
                   'jmx.properties does not contain connector.name'
        self.assertRegexpMatches(actual_out, expected)

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
Esempio n. 33
0
    def test_install_fails_java8_not_found(self):
        installer = StandalonePrestoInstaller(self)
        with relocate_jdk_directory(self.cluster, '/usr'):
            self.upload_topology()
            cmd_output = installer.install(pa_raise_error=False)
            actual = cmd_output.splitlines()
            num_failures = 0
            for line in enumerate(actual):
                if str(line).find('Error: Required Java version'
                                  ' could not be found') != -1:
                    num_failures += 1

            self.assertEqual(4, num_failures)

            for container in self.cluster.all_hosts():
                installer.assert_uninstalled(container)
    def test_install(self, installer=None):
        if installer is None:
            installer = StandalonePrestoInstaller(self)

        self.upload_topology()

        cmd_output = installer.install()
        expected = installed_all_hosts_output

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
    def test_install_worker_is_pa_master(self):
        installer = StandalonePrestoInstaller(self)
        topology = {
            "coordinator": "slave1",
            "workers": ["master", "slave2", "slave3"]
        }
        self.upload_topology(topology)

        cmd_output = installer.install(coordinator='slave1')
        expected = install_with_worker_pa_master_out

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        self.assert_installed_with_configs(self.cluster.slaves[0], [
            self.cluster.slaves[1], self.cluster.slaves[2], self.cluster.master
        ])
    def test_install_worker_is_pa_master(self):
        installer = StandalonePrestoInstaller(self, dummy=True)
        topology = {"coordinator": "slave1",
                    "workers": ["master", "slave2", "slave3"]}
        self.upload_topology(topology)

        cmd_output = installer.install(coordinator='slave1')
        expected = install_with_worker_pa_master_out

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        self.assert_installed_with_configs(
            self.cluster.slaves[0],
            [self.cluster.slaves[1],
             self.cluster.slaves[2],
             self.cluster.master])
Esempio n. 37
0
    def test_install_twice(self):
        installer = StandalonePrestoInstaller(self)
        self.test_install(installer=installer)
        output = installer.install(pa_raise_error=False)

        self.default_keywords.update(installer.get_keywords())

        with open(os.path.join(LOCAL_RESOURCES_DIR, 'install_twice.txt'),
                  'r') as f:
            expected = f.read()
        expected = self.escape_for_regex(self.replace_keywords(expected))

        self.assertRegexpMatchesLineByLine(output.splitlines(),
                                           expected.splitlines())
        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
    def test_install_failure_without_java8_home(self):
        installer = StandalonePrestoInstaller(self, dummy=True)
        for container in self.cluster.all_hosts():
            self.cluster.exec_cmd_on_host(container,
                                          "mv /usr/java/jdk1.8.0_40 /usr/")
        self.upload_topology()
        cmd_output = installer.install(pa_raise_error=False)
        actual = cmd_output.splitlines()
        num_failures = 0
        for line in enumerate(actual):
            if str(line).find("Error: Required Java version"
                              " could not be found") != -1:
                num_failures += 1

        self.assertEqual(4, num_failures)

        for container in self.cluster.all_hosts():
            installer.assert_uninstalled(container, dummy=True)
Esempio n. 39
0
    def test_install_with_java8_home(self):
        installer = StandalonePrestoInstaller(self)

        with relocate_jdk_directory(self.cluster, '/usr') as new_java_home:
            topology = {"coordinator": "master",
                        "workers": ["slave1", "slave2", "slave3"],
                        "java8_home": new_java_home}
            self.upload_topology(topology)

            cmd_output = installer.install()
            expected = self.format_err_msgs_with_internal_hosts(installed_all_hosts_output)

            actual = cmd_output.splitlines()
            self.assertRegexpMatchesLineByLine(actual, expected)

            for host in self.cluster.all_hosts():
                installer.assert_installed(self, host)
                self.assert_has_default_config(host)
                self.assert_has_default_connector(host)
    def test_install_twice(self):
        installer = StandalonePrestoInstaller(self, dummy=True)
        self.test_install(installer=installer)
        output = installer.install(pa_raise_error=False)

        self.default_keywords.update(installer.get_keywords())

        with open(os.path.join(LOCAL_RESOURCES_DIR, 'install_twice.txt'), 'r') \
                as f:
            expected = f.read()
        expected = self.escape_for_regex(
            self.replace_keywords(expected))

        self.assertRegexpMatchesLineByLine(output.splitlines(),
                                           expected.splitlines())
        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
Esempio n. 41
0
    def test_install_with_java8_home(self):
        installer = StandalonePrestoInstaller(self)
        new_java_home = relocate_default_java(self.cluster, '/usr')

        topology = {"coordinator": "master",
                    "workers": ["slave1", "slave2", "slave3"],
                    "java8_home": new_java_home}
        self.upload_topology(topology)

        cmd_output = installer.install()
        expected = installed_all_hosts_output

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
    def test_install_with_java8_home(self):
        installer = StandalonePrestoInstaller(self, dummy=True)
        for container in self.cluster.all_hosts():
            self.cluster.exec_cmd_on_host(container,
                                          "mv /usr/java/jdk1.8.0_40 /usr/")
        topology = {"coordinator": "master",
                    "workers": ["slave1", "slave2", "slave3"],
                    "java8_home": "/usr/jdk1.8.0_40/jre"}
        self.upload_topology(topology)

        cmd_output = installer.install()
        expected = installed_all_hosts_output

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
Esempio n. 43
0
    def test_install_with_java8_home(self):
        installer = StandalonePrestoInstaller(self)
        new_java_home = relocate_default_java(self.cluster, '/usr')

        topology = {
            "coordinator": "master",
            "workers": ["slave1", "slave2", "slave3"],
            "java8_home": new_java_home
        }
        self.upload_topology(topology)

        cmd_output = installer.install()
        expected = installed_all_hosts_output

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
    def test_install_with_java8_home(self):
        installer = StandalonePrestoInstaller(self)
        for container in self.cluster.all_hosts():
            self.cluster.exec_cmd_on_host(container,
                                          "mv /usr/java/jdk1.8.0_40 /usr/")
        topology = {
            "coordinator": "master",
            "workers": ["slave1", "slave2", "slave3"],
            "java8_home": "/usr/jdk1.8.0_40/jre"
        }
        self.upload_topology(topology)

        cmd_output = installer.install()
        expected = installed_all_hosts_output

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in self.cluster.all_hosts():
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
    def test_install_when_topology_has_ips(self):
        installer = StandalonePrestoInstaller(self)
        ips = self.cluster.get_ip_address_dict()
        topology = {
            "coordinator": ips[self.cluster.master],
            "workers": [ips[self.cluster.slaves[0]]]
        }
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master)

        cmd_output = installer.install().splitlines()
        expected = [
            r'Deploying rpm on %s...' % ips[self.cluster.master],
            r'Deploying rpm on %s...' % ips[self.cluster.slaves[0]],
            r'Package deployed successfully on: ' + ips[self.cluster.master],
            r'Package installed successfully on: ' + ips[self.cluster.master],
            r'Package deployed successfully on: ' +
            ips[self.cluster.slaves[0]],
            r'Package installed successfully on: ' +
            ips[self.cluster.slaves[0]],
            r'Deploying configuration on: ' + ips[self.cluster.master],
            r'Deploying jmx.properties, tpch.properties '
            r'connector configurations on: ' + ips[self.cluster.master],
            r'Deploying configuration on: ' + ips[self.cluster.slaves[0]],
            r'Deploying jmx.properties, tpch.properties '
            r'connector configurations on: ' + ips[self.cluster.slaves[0]]
        ]

        cmd_output.sort()
        expected.sort()
        self.assertRegexpMatchesLineByLine(expected, cmd_output)

        self.assert_installed_with_regex_configs(self.cluster.master,
                                                 [self.cluster.slaves[0]])
        for container in [self.cluster.master, self.cluster.slaves[0]]:
            self.assert_has_jmx_connector(container)
Esempio n. 46
0
    def test_install_when_connector_json_exists(self):
        installer = StandalonePrestoInstaller(self)
        topology = {"coordinator": "master",
                    "workers": ["slave1"]}
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(get_connectors_directory(), 'jmx.properties'),
            self.cluster.master
        )

        cmd_output = installer.install()
        expected = ['Deploying rpm on master...',
                    'Deploying rpm on slave1...',
                    'Package deployed successfully on: slave1',
                    'Package installed successfully on: slave1',
                    'Package deployed successfully on: master',
                    'Package installed successfully on: master',
                    'Deploying configuration on: master',
                    'Deploying jmx.properties, tpch.properties '
                    'connector configurations on: master ',
                    'Deploying configuration on: slave1',
                    'Deploying jmx.properties, tpch.properties '
                    'connector configurations on: slave1 ',
                    'Using rpm_specifier as a local path',
                    'Fetching local presto rpm at path: .*',
                    'Found existing rpm at: .*']

        actual = cmd_output.splitlines()
        self.assertRegexpMatchesLineByLine(actual, expected)

        for container in [self.cluster.master,
                          self.cluster.slaves[0]]:
            installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
            self.assert_has_jmx_connector(container)
Esempio n. 47
0
class TestStatus(BaseProductTestCase):

    def setUp(self):
        super(TestStatus, self).setUp()
        self.installer = StandalonePrestoInstaller(self)

    def test_status_uninstalled(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        self.upload_topology()
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_installed_status())

    def test_status_not_started(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_started_status())

    @attr('smoketest')
    def test_status_happy_path(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)
        self.run_prestoadmin('server start')
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.base_status())

    def test_status_only_coordinator(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)

        self.run_prestoadmin('server start -H master')
        # don't run with retries because it won't be able to query the
        # coordinator because the coordinator is set to not be a worker
        status_output = self.run_prestoadmin('server status')
        self.check_status(
            status_output,
            self.single_node_up_status(self.cluster.internal_master)
        )

    def test_status_only_worker(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)

        self.run_prestoadmin('server start -H slave1')
        status_output = self._server_status_with_retries()
        self.check_status(
            status_output,
            self.single_node_up_status(self.cluster.internal_slaves[0])
        )

        # Check that the slave sees that it's stopped, even though the
        # discovery server is not up.
        self.run_prestoadmin('server stop')
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_started_status())

    def test_connection_to_coordinator_lost(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        topology = {"coordinator": "slave1", "workers":
                    ["master", "slave2", "slave3"]}
        self.upload_topology(topology=topology)
        self.installer.install()
        self.run_prestoadmin('server start')
        self.cluster.stop_host(
            self.cluster.slaves[0])
        topology = {"coordinator": self.cluster.get_down_hostname("slave1"),
                    "workers": ["master", "slave2", "slave3"]}
        status_output = self._server_status_with_retries()
        statuses = self.node_not_available_status(
            topology, self.cluster.internal_slaves[0],
            coordinator_down=True)
        self.check_status(status_output, statuses)

    def test_connection_to_worker_lost(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        topology = {"coordinator": "slave1", "workers":
                    ["master", "slave2", "slave3"]}
        self.upload_topology(topology=topology)
        self.installer.install()
        self.run_prestoadmin('server start')
        self.cluster.stop_host(
            self.cluster.slaves[1])
        topology = {"coordinator": "slave1", "workers":
                    ["master", self.cluster.get_down_hostname("slave2"),
                     "slave3"]}
        status_output = self._server_status_with_retries()
        statuses = self.node_not_available_status(
            topology, self.cluster.internal_slaves[1])
        self.check_status(status_output, statuses)

    def test_status_port_not_8080(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        self.upload_topology()

        port_config = """discovery.uri=http://master:8090
http-server.http.port=8090"""

        self.installer.install(extra_configs=port_config)
        self.run_prestoadmin('server start')
        status_output = self._server_status_with_retries()

        self.check_status(status_output, self.base_status(), 8090)

    def base_status(self, topology=None):
        ips = self.cluster.get_ip_address_dict()
        if not topology:
            topology = {
                'coordinator': self.cluster.internal_master, 'workers':
                [self.cluster.internal_slaves[0],
                 self.cluster.internal_slaves[1],
                 self.cluster.internal_slaves[2]]
            }
        statuses = []
        hosts_in_status = [topology['coordinator']] + topology['workers'][:]
        for host in hosts_in_status:
            role = 'coordinator' if host is topology['coordinator']\
                else 'worker'
            status = {'host': host, 'role': role, 'ip': ips[host],
                      'is_running': 'Running'}
            statuses += [status]
        return statuses

    def not_started_status(self):
        statuses = self.base_status()
        for status in statuses:
            status['ip'] = 'Unknown'
            status['is_running'] = 'Not Running'
            status['error_message'] = '\tNo information available: ' \
                                      'unable to query coordinator'
        return statuses

    def not_installed_status(self):
        statuses = self.base_status()
        for status in statuses:
            status['ip'] = 'Unknown'
            status['is_running'] = 'Not Running'
            status['error_message'] = '\tPresto is not installed.'
        return statuses

    def single_node_up_status(self, node):
        statuses = self.not_started_status()
        for status in statuses:
            if status['host'] is node:
                status['is_running'] = 'Running'
        return statuses

    def node_not_available_status(self, topology, node,
                                  coordinator_down=False):
        statuses = self.base_status(topology)
        for status in statuses:
            if status['host'] == node:
                status['is_running'] = 'Not Running'
                status['error_message'] = \
                    self.status_node_connection_error(node)
                status['ip'] = 'Unknown'
                status['host'] = self.cluster.get_down_hostname(node)
            elif coordinator_down:
                status['error_message'] = '\tNo information available: ' \
                                          'unable to query coordinator'
                status['ip'] = 'Unknown'

        return statuses

    def check_status(self, cmd_output, statuses, port=8080):
        expected_output = []
        for status in statuses:
            expected_output += \
                ['Server Status:',
                 '\t%s\(IP: %s, Roles: %s\): %s' %
                 (status['host'], status['ip'], status['role'],
                  status['is_running'])]
            if 'error_message' in status and status['error_message']:
                expected_output += [status['error_message']]
            elif status['is_running'] is 'Running':
                expected_output += \
                    ['\tNode URI\(http\): http://%s:%s' % (status['ip'],
                                                           str(port)),
                     '\tPresto Version: ' + PRESTO_VERSION,
                     '\tNode is active: True',
                     '\tConnectors:     system, tpch']

        self.assertRegexpMatches(cmd_output, '\n'.join(expected_output))

    def _server_status_with_retries(self):
        return self.retry(lambda: self._get_status_until_coordinator_updated())

    def _get_status_until_coordinator_updated(self):
        status_output = self.run_prestoadmin('server status')
        if 'the coordinator has not yet discovered this node' in status_output:
            raise PrestoError('Coordinator has not discovered all nodes yet: '
                              '%s' % status_output)
        if 'Roles: coordinator): Running\n\tNo information available: ' \
           'unable to query coordinator' in status_output:
            raise PrestoError('Coordinator not started up properly yet.'
                              '\nOutput: %s' % status_output)
        return status_output
class TestServerUninstall(BaseProductTestCase):
    def setUp(self):
        super(TestServerUninstall, self).setUp()
        self.installer = StandalonePrestoInstaller(self)

    @attr('smoketest')
    def test_uninstall(self):
        self.setup_cluster(NoHadoopBareImageProvider(),
                           self.STANDALONE_PRESTO_CLUSTER)
        start_output = self.run_prestoadmin('server start')
        process_per_host = self.get_process_per_host(start_output.splitlines())
        self.assert_started(process_per_host)

        cmd_output = self.run_prestoadmin(
            'server uninstall', raise_error=False).splitlines()
        self.assert_stopped(process_per_host)
        expected = uninstall_output + self.expected_stop()[:]
        self.assertRegexpMatchesLineByLine(cmd_output, expected)

        for container in self.cluster.all_hosts():
            self.assert_uninstalled_dirs_removed(container)

    def assert_uninstalled_dirs_removed(self, container):
        self.installer.assert_uninstalled(container)
        self.assert_path_removed(container, '/etc/presto')
        self.assert_path_removed(container, '/usr/lib/presto')
        self.assert_path_removed(container, '/var/lib/presto')
        self.assert_path_removed(container, '/usr/shared/doc/presto')
        self.assert_path_removed(container, '/etc/init.d/presto')

    def test_uninstall_when_server_down(self):
        self.setup_cluster(NoHadoopBareImageProvider(),
                           self.STANDALONE_PRESTO_CLUSTER)
        start_output = self.run_prestoadmin('server start')
        process_per_host = self.get_process_per_host(start_output.splitlines())

        self.run_prestoadmin('server stop -H %s' %
                             self.cluster.internal_slaves[0])
        cmd_output = self.run_prestoadmin('server uninstall').splitlines()
        self.assert_stopped(process_per_host)
        expected = uninstall_output + self.expected_stop(
            not_running=[self.cluster.internal_slaves[0]])[:]
        self.assertRegexpMatchesLineByLine(cmd_output, expected)

        for container in self.cluster.all_hosts():
            self.assert_uninstalled_dirs_removed(container)

    def test_uninstall_twice(self):
        self.test_uninstall()

        output = self.run_prestoadmin('server uninstall', raise_error=False)
        with open(os.path.join(LOCAL_RESOURCES_DIR, 'uninstall_twice.txt'),
                  'r') as f:
            expected = f.read()

        self.assertEqualIgnoringOrder(expected, output)

    def test_uninstall_lost_host(self):
        self.setup_cluster(NoHadoopBareImageProvider(), self.PA_ONLY_CLUSTER)
        pa_installer = PrestoadminInstaller(self)
        pa_installer.install()
        topology = {"coordinator": self.cluster.internal_slaves[0],
                    "workers": [self.cluster.internal_master,
                                self.cluster.internal_slaves[1],
                                self.cluster.internal_slaves[2]]}
        self.upload_topology(topology)
        self.installer.install()
        start_output = self.run_prestoadmin('server start')
        process_per_host = self.get_process_per_host(start_output.splitlines())
        self.assert_started(process_per_host)
        down_node = self.cluster.internal_slaves[0]
        self.cluster.stop_host(
            self.cluster.slaves[0])

        expected = self.down_node_connection_error(
            self.cluster.internal_slaves[0])
        cmd_output = self.run_prestoadmin('server uninstall',
                                          raise_error=False)
        self.assertRegexpMatches(cmd_output, expected)
        process_per_active_host = []
        for host, pid in process_per_host:
            if host not in down_node:
                process_per_active_host.append((host, pid))
        self.assert_stopped(process_per_active_host)

        for container in [self.cluster.internal_master,
                          self.cluster.internal_slaves[1],
                          self.cluster.internal_slaves[2]]:
            self.assert_uninstalled_dirs_removed(container)

    def test_uninstall_with_dir_readonly(self):
        self.setup_cluster(NoHadoopBareImageProvider(),
                           self.STANDALONE_PRESTO_CLUSTER)
        start_output = self.run_prestoadmin('server start')
        process_per_host = self.get_process_per_host(start_output.splitlines())
        self.assert_started(process_per_host)

        self.run_script_from_prestoadmin_dir("chmod 500 -R /usr/lib/presto")
        self.run_prestoadmin('server uninstall', raise_error=False)

        # The master node was not able to be stopped or uninstalled because
        # the permissions of the directory were changed such that the
        # stop command can't run
        pid_to_remove = None
        for (host, pid) in process_per_host:
            if host == self.cluster.internal_master:
                pid_to_remove = pid
        process_per_host.remove((self.cluster.internal_master, pid_to_remove))
        self.assert_stopped(process_per_host)

        uninstalled_hosts = self.cluster.all_hosts()[:]
        uninstalled_hosts.remove(self.cluster.master)

        for container in uninstalled_hosts:
            self.assert_uninstalled_dirs_removed(container)

        self.installer.assert_installed(self, container=self.cluster.master)

    @docker_only
    def test_uninstall_as_non_sudo(self):
        self.setup_cluster(NoHadoopBareImageProvider(), self.PA_ONLY_CLUSTER)
        self.upload_topology()
        self.installer.install()

        script = './presto-admin server uninstall -u testuser -p testpass'
        output = self.run_script_from_prestoadmin_dir(script)
        with open(os.path.join(LOCAL_RESOURCES_DIR, 'non_sudo_uninstall.txt'),
                  'r') as f:
            expected = f.read()

        self.assertEqualIgnoringOrder(expected, output)
Esempio n. 49
0
class TestStatus(BaseProductTestCase):

    def setUp(self):
        super(TestStatus, self).setUp()
        self.installer = StandalonePrestoInstaller(self)

    def test_status_uninstalled(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        self.upload_topology()
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_installed_status())

    def test_status_not_started(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_started_status())

    @attr('smoketest')
    def test_status_happy_path(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
        self.run_prestoadmin('server start')
        status_output = self._server_status_with_retries(check_connectors=True)
        self.check_status(status_output, self.base_status())

    def test_status_only_coordinator(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)

        self.run_prestoadmin('server start -H master')
        # don't run with retries because it won't be able to query the
        # coordinator because the coordinator is set to not be a worker
        status_output = self.run_prestoadmin('server status')
        self.check_status(
            status_output,
            self.single_node_up_status(self.cluster.internal_master)
        )

    def test_status_only_worker(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)

        self.run_prestoadmin('server start -H slave1')
        status_output = self._server_status_with_retries()
        self.check_status(
            status_output,
            self.single_node_up_status(self.cluster.internal_slaves[0])
        )

        # Check that the slave sees that it's stopped, even though the
        # discovery server is not up.
        self.run_prestoadmin('server stop')
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_started_status())

    def test_connection_to_coordinator_lost(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        topology = {"coordinator": "slave1", "workers":
                    ["master", "slave2", "slave3"]}
        self.upload_topology(topology=topology)
        self.installer.install(coordinator='slave1')
        self.run_prestoadmin('server start')
        self.cluster.stop_host(
            self.cluster.slaves[0])
        topology = {"coordinator": self.cluster.get_down_hostname("slave1"),
                    "workers": ["master", "slave2", "slave3"]}
        status_output = self._server_status_with_retries()
        statuses = self.node_not_available_status(
            topology, self.cluster.internal_slaves[0],
            coordinator_down=True)
        self.check_status(status_output, statuses)

    def test_connection_to_worker_lost(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        topology = {"coordinator": "slave1", "workers":
                    ["master", "slave2", "slave3"]}
        self.upload_topology(topology=topology)
        self.installer.install(coordinator='slave1')
        self.run_prestoadmin('server start')
        self.cluster.stop_host(
            self.cluster.slaves[1])
        topology = {"coordinator": "slave1", "workers":
                    ["master", self.cluster.get_down_hostname("slave2"),
                     "slave3"]}
        status_output = self._server_status_with_retries(check_connectors=True)
        statuses = self.node_not_available_status(
            topology, self.cluster.internal_slaves[1])
        self.check_status(status_output, statuses)

    def test_status_port_not_8080(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        self.upload_topology()

        port_config = """discovery.uri=http://master:8090
http-server.http.port=8090"""

        self.installer.install(extra_configs=port_config)
        self.run_prestoadmin('server start')
        status_output = self._server_status_with_retries(check_connectors=True)

        self.check_status(status_output, self.base_status(), 8090)

    def test_status_non_root_user(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
        self.upload_topology(
            {"coordinator": "master",
             "workers": ["slave1", "slave2", "slave3"],
             "username": "******"}
        )
        self.run_prestoadmin('server start -p password')
        status_output = self._server_status_with_retries(check_connectors=True, extra_arguments=' -p password')
        self.check_status(status_output, self.base_status())

    def base_status(self, topology=None):
        ips = self.cluster.get_ip_address_dict()
        if not topology:
            topology = {
                'coordinator': self.cluster.internal_master, 'workers':
                [self.cluster.internal_slaves[0],
                 self.cluster.internal_slaves[1],
                 self.cluster.internal_slaves[2]]
            }
        statuses = []
        hosts_in_status = [topology['coordinator']] + topology['workers'][:]
        for host in hosts_in_status:
            role = 'coordinator' if host is topology['coordinator']\
                else 'worker'
            status = {'host': host, 'role': role, 'ip': ips[host],
                      'is_running': 'Running'}
            statuses += [status]
        return statuses

    def not_started_status(self):
        statuses = self.base_status()
        for status in statuses:
            status['ip'] = 'Unknown'
            status['is_running'] = 'Not Running'
            status['error_message'] = '\tNo information available: ' \
                                      'unable to query coordinator'
        return statuses

    def not_installed_status(self):
        statuses = self.base_status()
        for status in statuses:
            status['ip'] = 'Unknown'
            status['is_running'] = 'Not Running'
            status['error_message'] = '\tPresto is not installed.'
        return statuses

    def single_node_up_status(self, node):
        statuses = self.not_started_status()
        for status in statuses:
            if status['host'] is node:
                status['is_running'] = 'Running'
        return statuses

    def node_not_available_status(self, topology, node,
                                  coordinator_down=False):
        statuses = self.base_status(topology)
        for status in statuses:
            if status['host'] == node:
                status['is_running'] = 'Not Running'
                status['error_message'] = \
                    self.status_node_connection_error(node)
                status['ip'] = 'Unknown'
                status['host'] = self.cluster.get_down_hostname(node)
            elif coordinator_down:
                status['error_message'] = '\tNo information available: ' \
                                          'unable to query coordinator'
                status['ip'] = 'Unknown'

        return statuses

    def status_fail_msg(self, actual_output, expected_regexp):
        log_tail = self.fetch_log_tail(lines=100)

        return (
            '=== ACTUAL OUTPUT ===\n%s\n=== DID NOT MATCH REGEXP ===\n%s\n'
            '=== LOG FOR DEBUGGING ===\n%s=== END OF LOG ===' % (
                actual_output, expected_regexp, log_tail))

    def check_status(self, cmd_output, statuses, port=8080):
        expected_output = []
        for status in statuses:
            expected_output += \
                ['Server Status:',
                 '\t%s\(IP: %s, Roles: %s\): %s' %
                 (status['host'], status['ip'], status['role'],
                  status['is_running'])]
            if 'error_message' in status and status['error_message']:
                expected_output += [status['error_message']]
            elif status['is_running'] is 'Running':
                expected_output += \
                    ['\tNode URI\(http\): http://%s:%s' % (status['ip'],
                                                           str(port)),
                     '\tPresto Version: ' + PRESTO_VERSION,
                     '\tNode status:    active',
                     '\tConnectors:     system, tpch']

        expected_regex = '\n'.join(expected_output)
        # The status command is written such that there are a couple ways that
        # the presto client can fail that result in partial output from the
        # command, but errors in the logs. If we fail to match, we include the
        # log information in the assertion message to make determining exactly
        # what failed easier. Grab the logs lazily so that we don't incur the
        # cost of getting them when they aren't needed. The status tests are
        # slow enough already.
        self.assertLazyMessage(
            lambda: self.status_fail_msg(cmd_output, expected_regex),
            self.assertRegexpMatches, cmd_output, expected_regex)

    def _server_status_with_retries(self, check_connectors=False, extra_arguments=''):
        try:
            return self.retry(lambda: self._get_status_until_coordinator_updated(
                check_connectors, extra_arguments=extra_arguments), 180, 0)
        except PrestoError as e:
            self.assertLazyMessage(
                self.status_fail_msg(e.message, "Ran out of time retrying status"),
                self.fail("PrestoError %s" % (e.message,)))

    def _get_status_until_coordinator_updated(self, check_connectors=False, extra_arguments=''):
        status_output = self.run_prestoadmin('server status' + extra_arguments)
        if 'the coordinator has not yet discovered this node' in status_output:
            raise PrestoError('Coordinator has not discovered all nodes yet: '
                              '%s' % status_output)
        if 'Roles: coordinator): Running\n\tNo information available: ' \
           'unable to query coordinator' in status_output:
            raise PrestoError('Coordinator not started up properly yet.'
                              '\nOutput: %s' % status_output)
        if check_connectors and 'Connectors:' not in status_output:
            raise PrestoError('Connectors not loaded yet: %s' % status_output)
        return status_output
class TestServerUninstall(BaseProductTestCase):
    def setUp(self):
        super(TestServerUninstall, self).setUp()
        self.installer = StandalonePrestoInstaller(self)

    @attr('smoketest')
    def test_uninstall(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)
        start_output = self.run_prestoadmin('server start')
        process_per_host = self.get_process_per_host(start_output.splitlines())
        self.assert_started(process_per_host)

        cmd_output = self.run_prestoadmin('server uninstall').splitlines()
        self.assert_stopped(process_per_host)
        expected = uninstall_output + self.expected_stop()[:]
        self.assertRegexpMatchesLineByLine(cmd_output, expected)

        for container in self.cluster.all_hosts():
            self.assert_uninstalled_dirs_removed(container)

    def assert_uninstalled_dirs_removed(self, container):
        self.installer.assert_uninstalled(container)
        self.assert_path_removed(container, '/etc/presto')
        self.assert_path_removed(container, '/usr/lib/presto')
        self.assert_path_removed(container, '/var/lib/presto')
        self.assert_path_removed(container, '/usr/shared/doc/presto')
        self.assert_path_removed(container, '/etc/init.d/presto')

    def test_uninstall_when_server_down(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)
        start_output = self.run_prestoadmin('server start')
        process_per_host = self.get_process_per_host(start_output.splitlines())

        self.run_prestoadmin('server stop -H %s' %
                             self.cluster.internal_slaves[0])
        cmd_output = self.run_prestoadmin('server uninstall').splitlines()
        self.assert_stopped(process_per_host)
        expected = uninstall_output + self.expected_stop(
            not_running=[self.cluster.internal_slaves[0]])[:]
        self.assertRegexpMatchesLineByLine(cmd_output, expected)

        for container in self.cluster.all_hosts():
            self.assert_uninstalled_dirs_removed(container)

    def test_uninstall_twice(self):
        self.test_uninstall()

        output = self.run_prestoadmin('server uninstall')
        with open(os.path.join(LOCAL_RESOURCES_DIR, 'uninstall_twice.txt'),
                  'r') as f:
            expected = f.read()

        self.assertEqualIgnoringOrder(expected, output)

    def test_uninstall_lost_host(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        pa_installer = PrestoadminInstaller(self)
        pa_installer.install()
        topology = {
            "coordinator":
            self.cluster.internal_slaves[0],
            "workers": [
                self.cluster.internal_master, self.cluster.internal_slaves[1],
                self.cluster.internal_slaves[2]
            ]
        }
        self.upload_topology(topology)
        self.installer.install()
        start_output = self.run_prestoadmin('server start')
        process_per_host = self.get_process_per_host(start_output.splitlines())
        self.assert_started(process_per_host)
        down_node = self.cluster.internal_slaves[0]
        self.cluster.stop_host(self.cluster.slaves[0])

        expected = self.down_node_connection_error(
            self.cluster.internal_slaves[0])
        cmd_output = self.run_prestoadmin('server uninstall')
        self.assertRegexpMatches(cmd_output, expected)
        process_per_active_host = []
        for host, pid in process_per_host:
            if host not in down_node:
                process_per_active_host.append((host, pid))
        self.assert_stopped(process_per_active_host)

        for container in [
                self.cluster.internal_master, self.cluster.internal_slaves[1],
                self.cluster.internal_slaves[2]
        ]:
            self.assert_uninstalled_dirs_removed(container)

    def test_uninstall_with_dir_readonly(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)
        start_output = self.run_prestoadmin('server start')
        process_per_host = self.get_process_per_host(start_output.splitlines())
        self.assert_started(process_per_host)

        self.run_script_from_prestoadmin_dir("chmod 500 -R /usr/lib/presto")
        self.run_prestoadmin('server uninstall').splitlines()

        # The master node was not able to be stopped or uninstalled because
        # the permissions of the directory were changed such that the
        # stop command can't run
        pid_to_remove = None
        for (host, pid) in process_per_host:
            if host == self.cluster.internal_master:
                pid_to_remove = pid
        process_per_host.remove((self.cluster.internal_master, pid_to_remove))
        self.assert_stopped(process_per_host)

        uninstalled_hosts = self.cluster.all_hosts()[:]
        uninstalled_hosts.remove(self.cluster.master)

        for container in uninstalled_hosts:
            self.assert_uninstalled_dirs_removed(container)

        self.installer.assert_installed(self, container=self.cluster.master)

    @docker_only
    def test_uninstall_as_non_sudo(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        self.upload_topology()
        self.installer.install(dummy=True)

        script = './presto-admin server uninstall -u testuser -p testpass'
        output = self.run_script_from_prestoadmin_dir(script)
        with open(os.path.join(LOCAL_RESOURCES_DIR, 'non_sudo_uninstall.txt'),
                  'r') as f:
            expected = f.read()

        self.assertEqualIgnoringOrder(expected, output)
Esempio n. 51
0
class TestStatus(BaseProductTestCase):
    def setUp(self):
        super(TestStatus, self).setUp()
        self.installer = StandalonePrestoInstaller(self)

    def test_status_uninstalled(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        self.upload_topology()
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_installed_status())

    def test_status_not_started(self):
        self.setup_cluster(NoHadoopBareImageProvider(),
                           STANDALONE_PRESTO_CLUSTER)
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_started_status())

    @attr('smoketest')
    def test_status_happy_path(self):
        self.setup_cluster(NoHadoopBareImageProvider(),
                           STANDALONE_PRESTO_CLUSTER)
        self.run_prestoadmin('server start')
        status_output = self._server_status_with_retries(check_connectors=True)
        self.check_status(status_output, self.base_status())

    def test_status_only_coordinator(self):
        self.setup_cluster(NoHadoopBareImageProvider(),
                           STANDALONE_PRESTO_CLUSTER)

        self.run_prestoadmin('server start -H master')
        # don't run with retries because it won't be able to query the
        # coordinator because the coordinator is set to not be a worker
        status_output = self.run_prestoadmin('server status')
        self.check_status(
            status_output,
            self.single_node_up_status(self.cluster.internal_master))

    def test_status_only_worker(self):
        self.setup_cluster(NoHadoopBareImageProvider(),
                           STANDALONE_PRESTO_CLUSTER)

        self.run_prestoadmin('server start -H slave1')
        status_output = self._server_status_with_retries()
        self.check_status(
            status_output,
            self.single_node_up_status(self.cluster.internal_slaves[0]))

        # Check that the slave sees that it's stopped, even though the
        # discovery server is not up.
        self.run_prestoadmin('server stop')
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_started_status())

    def test_connection_to_coordinator_lost(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        topology = {
            "coordinator": "slave1",
            "workers": ["master", "slave2", "slave3"]
        }
        self.upload_topology(topology=topology)
        self.installer.install(coordinator='slave1')
        self.run_prestoadmin('server start')
        self.cluster.stop_host(self.cluster.slaves[0])
        topology = {
            "coordinator": self.cluster.get_down_hostname("slave1"),
            "workers": ["master", "slave2", "slave3"]
        }
        status_output = self._server_status_with_retries()
        statuses = self.node_not_available_status(
            topology, self.cluster.internal_slaves[0], coordinator_down=True)
        self.check_status(status_output, statuses)

    def test_connection_to_worker_lost(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        topology = {
            "coordinator": "slave1",
            "workers": ["master", "slave2", "slave3"]
        }
        self.upload_topology(topology=topology)
        self.installer.install(coordinator='slave1')
        self.run_prestoadmin('server start')
        self.cluster.stop_host(self.cluster.slaves[1])
        topology = {
            "coordinator":
            "slave1",
            "workers":
            ["master",
             self.cluster.get_down_hostname("slave2"), "slave3"]
        }
        status_output = self._server_status_with_retries(check_connectors=True)
        statuses = self.node_not_available_status(
            topology, self.cluster.internal_slaves[1])
        self.check_status(status_output, statuses)

    def test_status_port_not_8080(self):
        self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
        self.upload_topology()

        port_config = """discovery.uri=http://master:8090
http-server.http.port=8090"""

        self.installer.install(extra_configs=port_config)
        self.run_prestoadmin('server start')
        status_output = self._server_status_with_retries(check_connectors=True)

        self.check_status(status_output, self.base_status(), 8090)

    def test_status_non_root_user(self):
        self.setup_cluster(NoHadoopBareImageProvider(),
                           STANDALONE_PRESTO_CLUSTER)
        self.upload_topology({
            "coordinator": "master",
            "workers": ["slave1", "slave2", "slave3"],
            "username": "******"
        })
        self.run_prestoadmin('server start -p password')
        status_output = self._server_status_with_retries(
            check_connectors=True, extra_arguments=' -p password')
        self.check_status(status_output, self.base_status())

    def base_status(self, topology=None):
        ips = self.cluster.get_ip_address_dict()
        if not topology:
            topology = {
                'coordinator':
                self.cluster.internal_master,
                'workers': [
                    self.cluster.internal_slaves[0],
                    self.cluster.internal_slaves[1],
                    self.cluster.internal_slaves[2]
                ]
            }
        statuses = []
        hosts_in_status = [topology['coordinator']] + topology['workers'][:]
        for host in hosts_in_status:
            role = 'coordinator' if host is topology['coordinator']\
                else 'worker'
            status = {
                'host': host,
                'role': role,
                'ip': ips[host],
                'is_running': 'Running'
            }
            statuses += [status]
        return statuses

    def not_started_status(self):
        statuses = self.base_status()
        for status in statuses:
            status['ip'] = 'Unknown'
            status['is_running'] = 'Not Running'
            status['error_message'] = '\tNo information available: ' \
                                      'unable to query coordinator'
        return statuses

    def not_installed_status(self):
        statuses = self.base_status()
        for status in statuses:
            status['ip'] = 'Unknown'
            status['is_running'] = 'Not Running'
            status['error_message'] = '\tPresto is not installed.'
        return statuses

    def single_node_up_status(self, node):
        statuses = self.not_started_status()
        for status in statuses:
            if status['host'] is node:
                status['is_running'] = 'Running'
        return statuses

    def node_not_available_status(self,
                                  topology,
                                  node,
                                  coordinator_down=False):
        statuses = self.base_status(topology)
        for status in statuses:
            if status['host'] == node:
                status['is_running'] = 'Not Running'
                status['error_message'] = \
                    self.status_node_connection_error(node)
                status['ip'] = 'Unknown'
                status['host'] = self.cluster.get_down_hostname(node)
            elif coordinator_down:
                status['error_message'] = '\tNo information available: ' \
                                          'unable to query coordinator'
                status['ip'] = 'Unknown'

        return statuses

    def status_fail_msg(self, actual_output, expected_regexp):
        log_tail = self.fetch_log_tail(lines=100)

        return ('=== ACTUAL OUTPUT ===\n%s\n=== DID NOT MATCH REGEXP ===\n%s\n'
                '=== LOG FOR DEBUGGING ===\n%s=== END OF LOG ===' %
                (actual_output, expected_regexp, log_tail))

    def check_status(self, cmd_output, statuses, port=8080):
        expected_output = []
        for status in statuses:
            expected_output += \
                ['Server Status:',
                 '\t%s\(IP: %s, Roles: %s\): %s' %
                 (status['host'], status['ip'], status['role'],
                  status['is_running'])]
            if 'error_message' in status and status['error_message']:
                expected_output += [status['error_message']]
            elif status['is_running'] is 'Running':
                expected_output += \
                    ['\tNode URI\(http\): http://%s:%s' % (status['ip'],
                                                           str(port)),
                     '\tPresto Version: ' + PRESTO_VERSION,
                     '\tNode status:    active',
                     '\tConnectors:     system, tpch']

        expected_regex = '\n'.join(expected_output)
        # The status command is written such that there are a couple ways that
        # the presto client can fail that result in partial output from the
        # command, but errors in the logs. If we fail to match, we include the
        # log information in the assertion message to make determining exactly
        # what failed easier. Grab the logs lazily so that we don't incur the
        # cost of getting them when they aren't needed. The status tests are
        # slow enough already.
        self.assertLazyMessage(
            lambda: self.status_fail_msg(cmd_output, expected_regex),
            self.assertRegexpMatches, cmd_output, expected_regex)

    def _server_status_with_retries(self,
                                    check_connectors=False,
                                    extra_arguments=''):
        try:
            return self.retry(
                lambda: self._get_status_until_coordinator_updated(
                    check_connectors, extra_arguments=extra_arguments), 180, 0)
        except PrestoError as e:
            self.assertLazyMessage(
                self.status_fail_msg(e.message,
                                     "Ran out of time retrying status"),
                self.fail("PrestoError %s" % (e.message, )))

    def _get_status_until_coordinator_updated(self,
                                              check_connectors=False,
                                              extra_arguments=''):
        status_output = self.run_prestoadmin('server status' + extra_arguments)
        if 'the coordinator has not yet discovered this node' in status_output:
            raise PrestoError('Coordinator has not discovered all nodes yet: '
                              '%s' % status_output)
        if 'Roles: coordinator): Running\n\tNo information available: ' \
           'unable to query coordinator' in status_output:
            raise PrestoError('Coordinator not started up properly yet.'
                              '\nOutput: %s' % status_output)
        if check_connectors and 'Connectors:' not in status_output:
            raise PrestoError('Connectors not loaded yet: %s' % status_output)
        return status_output
Esempio n. 52
0
class TestServerInstall(BaseProductTestCase):
    default_workers_config_with_slave1_ = """coordinator=false
discovery.uri=http://slave1:8080
http-server.http.port=8080
query.max-memory-per-node=512MB
query.max-memory=50GB\n"""

    default_coord_config_with_slave1_ = """coordinator=true
discovery-server.enabled=true
discovery.uri=http://slave1:8080
http-server.http.port=8080
node-scheduler.include-coordinator=false
query.max-memory-per-node=512MB
query.max-memory=50GB\n"""

    default_workers_config_regex_ = """coordinator=false
discovery.uri=http:.*:8080
http-server.http.port=8080
query.max-memory-per-node=512MB
query.max-memory=50GB\n"""

    default_coord_config_regex_ = """coordinator=true
discovery-server.enabled=true
discovery.uri=http:.*:8080
http-server.http.port=8080
node-scheduler.include-coordinator=false
query.max-memory-per-node=512MB
query.max-memory=50GB\n"""

    def setUp(self):
        super(TestServerInstall, self).setUp()
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        self.installer = StandalonePrestoInstaller(self)

    def assert_common_configs(self, container):
        self.installer.assert_installed(self, container)
        self.assert_file_content(container, '/etc/presto/jvm.config',
                                 self.default_jvm_config_)
        self.assert_node_config(container, self.default_node_properties_)
        self.assert_has_default_connector(container)

    def assert_installed_with_configs(self, master, slaves):
        self.assert_common_configs(master)
        self.assert_file_content(master, '/etc/presto/config.properties',
                                 self.default_coord_config_with_slave1_)
        for container in slaves:
            self.assert_common_configs(container)
            self.assert_file_content(container,
                                     '/etc/presto/config.properties',
                                     self.default_workers_config_with_slave1_)

    def assert_installed_with_regex_configs(self, master, slaves):
        self.assert_common_configs(master)
        self.assert_file_content_regex(master, '/etc/presto/config.properties',
                                       self.default_coord_config_regex_)
        for container in slaves:
            self.assert_common_configs(container)
            self.assert_file_content_regex(container,
                                           '/etc/presto/config.properties',
                                           self.default_workers_config_regex_)

    @attr('smoketest')
    def test_install(self, dummy=False):
        self.upload_topology()

        cmd_output = self.installer.install(dummy)
        expected = installed_all_hosts_output

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in self.cluster.all_hosts():
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)

    def test_install_worker_is_pa_master(self):
        topology = {
            "coordinator": "slave1",
            "workers": ["master", "slave2", "slave3"]
        }
        self.upload_topology(topology)

        cmd_output = self.installer.install(dummy=True)
        expected = install_with_worker_pa_master_out

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        self.assert_installed_with_configs(self.cluster.slaves[0], [
            self.cluster.slaves[1], self.cluster.slaves[2], self.cluster.master
        ])

    def test_install_ext_host_is_pa_master(self):
        topology = {"coordinator": "slave1", "workers": ["slave2", "slave3"]}
        self.upload_topology(topology)

        cmd_output = self.installer.install(dummy=True)
        expected = install_with_ext_host_pa_master_out

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        self.assert_installed_with_configs(
            self.cluster.slaves[0],
            [self.cluster.slaves[1], self.cluster.slaves[2]])

    def test_install_when_connector_json_exists(self):
        topology = {"coordinator": "master", "workers": ["slave1"]}
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master)

        cmd_output = self.installer.install(dummy=True)
        expected = [
            'Deploying rpm on master...', 'Deploying rpm on slave1...',
            'Package deployed successfully on: slave1',
            'Package installed successfully on: slave1',
            'Package deployed successfully on: master',
            'Package installed successfully on: master',
            'Deploying configuration on: master',
            'Deploying jmx.properties, tpch.properties '
            'connector configurations on: master ',
            'Deploying configuration on: slave1',
            'Deploying jmx.properties, tpch.properties '
            'connector configurations on: slave1 '
        ]

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in [self.cluster.master, self.cluster.slaves[0]]:
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
            self.assert_has_jmx_connector(container)

    def test_install_when_topology_has_ips(self):
        ips = self.cluster.get_ip_address_dict()
        topology = {
            "coordinator": ips[self.cluster.master],
            "workers": [ips[self.cluster.slaves[0]]]
        }
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master)

        cmd_output = self.installer.install(dummy=True).splitlines()
        expected = [
            r'Deploying rpm on %s...' % ips[self.cluster.master],
            r'Deploying rpm on %s...' % ips[self.cluster.slaves[0]],
            r'Package deployed successfully on: ' + ips[self.cluster.master],
            r'Package installed successfully on: ' + ips[self.cluster.master],
            r'Package deployed successfully on: ' +
            ips[self.cluster.slaves[0]],
            r'Package installed successfully on: ' +
            ips[self.cluster.slaves[0]],
            r'Deploying configuration on: ' + ips[self.cluster.master],
            r'Deploying jmx.properties, tpch.properties '
            r'connector configurations on: ' + ips[self.cluster.master],
            r'Deploying configuration on: ' + ips[self.cluster.slaves[0]],
            r'Deploying jmx.properties, tpch.properties '
            r'connector configurations on: ' + ips[self.cluster.slaves[0]]
        ]

        cmd_output.sort()
        expected.sort()
        self.assertRegexpMatchesLineByLine(expected, cmd_output)

        self.assert_installed_with_regex_configs(self.cluster.master,
                                                 [self.cluster.slaves[0]])
        for container in [self.cluster.master, self.cluster.slaves[0]]:
            self.assert_has_jmx_connector(container)

    def test_install_interactive_with_hostnames(self):
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master)
        rpm_name = self.installer.copy_presto_rpm_to_master()
        self.write_test_configs(self.cluster)

        cmd_output = self.run_script_from_prestoadmin_dir(
            'echo -e "root\n22\n%(master)s\n%(slave1)s\n" | '
            './presto-admin server install /mnt/presto-admin/%(rpm)s ',
            rpm=rpm_name)

        actual = cmd_output.splitlines()
        expected = [
            r'Enter user name for SSH connection to all nodes: '
            r'\[root\] '
            r'Enter port number for SSH connections to all nodes: '
            r'\[22\] '
            r'Enter host name or IP address for coordinator node.  '
            r'Enter an external host name or ip address if this is a '
            r'multi-node cluster: \[localhost\] '
            r'Enter host names or IP addresses for worker nodes '
            r'separated by spaces: '
            r'\[localhost\] Deploying rpm on .*\.\.\.',
            r'Package deployed successfully on: ' +
            self.cluster.internal_master,
            r'Package installed successfully on: ' +
            self.cluster.internal_master,
            r'Package deployed successfully on: ' +
            self.cluster.internal_slaves[0],
            r'Package installed successfully on: ' +
            self.cluster.internal_slaves[0],
            r'Deploying configuration on: ' + self.cluster.internal_master,
            r'Deploying jmx.properties, tpch.properties connector '
            r'configurations on: ' + self.cluster.internal_master,
            r'Deploying configuration on: ' + self.cluster.internal_slaves[0],
            r'Deploying jmx.properties, tpch.properties connector '
            r'configurations on: ' + self.cluster.internal_slaves[0],
            r'Deploying rpm on .*\.\.\.'
        ]

        self.assertRegexpMatchesLineByLine(actual, expected)
        for container in [self.cluster.master, self.cluster.slaves[0]]:
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
            self.assert_has_jmx_connector(container)

    def test_install_interactive_with_ips(self):
        ips = self.cluster.get_ip_address_dict()
        rpm_name = self.installer.copy_presto_rpm_to_master()
        self.write_test_configs(self.cluster)

        additional_keywords = {
            'rpm': rpm_name,
            'master_ip': ips[self.cluster.master],
            'slave1_ip': ips[self.cluster.slaves[0]]
        }
        cmd_output = self.run_script_from_prestoadmin_dir(
            'echo -e "root\n22\n%(master_ip)s\n%(slave1_ip)s\n" | '
            './presto-admin server install /mnt/presto-admin/%(rpm)s ',
            **additional_keywords).splitlines()
        expected = [
            r'Enter user name for SSH connection to all nodes: '
            r'\[root\] '
            r'Enter port number for SSH connections to all nodes: '
            r'\[22\] '
            r'Enter host name or IP address for coordinator node.  '
            r'Enter an external host name or ip address if this is a '
            r'multi-node cluster: \[localhost\] '
            r'Enter host names or IP addresses for worker nodes '
            r'separated by spaces: '
            r'\[localhost\] Deploying rpm on .*\.\.\.',
            r'Package deployed successfully on: ' + ips[self.cluster.master],
            r'Package installed successfully on: ' + ips[self.cluster.master],
            r'Package deployed successfully on: ' +
            ips[self.cluster.slaves[0]],
            r'Package installed successfully on: ' +
            ips[self.cluster.slaves[0]], r'Deploying configuration on: ' +
            ips[self.cluster.master], r'Deploying tpch.properties connector '
            r'configurations on: ' + ips[self.cluster.master],
            r'Deploying configuration on: ' + ips[self.cluster.slaves[0]],
            r'Deploying tpch.properties connector '
            r'configurations on: ' + ips[self.cluster.slaves[0]],
            r'Deploying rpm on .*\.\.\.'
        ]

        cmd_output.sort()
        expected.sort()
        for expected_regexp, actual_line in zip(expected, cmd_output):
            self.assertRegexpMatches(actual_line, expected_regexp)

        self.assert_installed_with_regex_configs(self.cluster.master,
                                                 [self.cluster.slaves[0]])

    def test_install_with_wrong_topology(self):
        rpm_name = self.installer.copy_presto_rpm_to_master()
        topology = {'coordinator': 'dummy_master', 'workers': ['slave1']}
        self.upload_topology(topology)
        expected = 'u\'dummy_master\' is not a valid ip address or' \
                   ' host name.' \
                   '  More detailed information can be found in ' \
                   '/var/log/prestoadmin/presto-admin.log\n'
        self.assertRaisesRegexp(OSError,
                                expected,
                                self.run_prestoadmin,
                                'server install /mnt/presto-admin/%(rpm)s ',
                                rpm=rpm_name)

    def test_install_with_malformed_topology(self):
        rpm_name = self.installer.copy_presto_rpm_to_master()
        topology = {'coordinator': 'master', 'workers': 'slave1' 'slave2'}
        self.upload_topology(topology)
        expected = 'Workers must be of type list.  Found <type \'unicode\'>.' \
                   '  More detailed information can be found in ' \
                   '/var/log/prestoadmin/presto-admin.log'

        self.assertRaisesRegexp(OSError,
                                expected,
                                self.run_prestoadmin,
                                'server install /mnt/presto-admin/%(rpm)s ',
                                rpm=rpm_name)

    def test_install_with_malformed_connector(self):
        self.upload_topology()
        self.cluster.write_content_to_host(
            'connectr.typo:invalid',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master)
        actual_out = self.installer.install(dummy=True)
        expected = 'Underlying exception:\n    Catalog configuration ' \
                   'jmx.properties does not contain connector.name'
        self.assertRegexpMatches(actual_out, expected)

        for container in self.cluster.all_hosts():
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)

    def test_connection_to_coord_lost(self):
        down_node = self.cluster.internal_slaves[0]
        topology = {
            "coordinator":
            down_node,
            "workers": [
                self.cluster.internal_master, self.cluster.internal_slaves[1],
                self.cluster.internal_slaves[2]
            ]
        }
        self.upload_topology(topology=topology)
        self.cluster.stop_host(self.cluster.slaves[0])

        actual_out = self.installer.install(dummy=True)
        self.assertRegexpMatches(actual_out,
                                 self.down_node_connection_error(down_node))

        for container in [
                self.cluster.master, self.cluster.slaves[1],
                self.cluster.slaves[2]
        ]:
            self.assert_common_configs(container)
            self.assert_file_content(
                container, '/etc/presto/config.properties',
                self.default_workers_config_with_slave1_.replace(
                    down_node, self.cluster.get_down_hostname(down_node)))

    @docker_only
    def test_install_with_no_perm_to_local_path(self):
        rpm_name = self.installer.copy_presto_rpm_to_master()
        self.upload_topology()
        self.run_prestoadmin("configuration deploy")

        script = 'chmod 600 /mnt/presto-admin/%(rpm)s; su app-admin -c ' \
                 '"./presto-admin server install /mnt/presto-admin/%(rpm)s "'
        error_msg = '\nFatal error: [%(host)s] error: ' \
                    '/mnt/presto-admin/%(rpm)s: ' \
                    'open failed: Permission denied\n\nAborting.\n'
        expected = ''
        for host in self.cluster.all_internal_hosts():
            expected += error_msg % {'host': host, 'rpm': rpm_name}
        actual = self.run_script_from_prestoadmin_dir(script, rpm=rpm_name)
        self.assertEqualIgnoringOrder(actual, expected)

    def test_install_twice(self):
        self.test_install(dummy=True)
        output = self.installer.install(dummy=True)

        with open(os.path.join(LOCAL_RESOURCES_DIR, 'install_twice.txt'), 'r') \
                as f:
            expected = f.read()
        expected = self.escape_for_regex(self.replace_keywords(expected))

        self.assertRegexpMatchesLineByLine(output.splitlines(),
                                           expected.splitlines())
        for container in self.cluster.all_hosts():
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
Esempio n. 53
0
class TestStatus(BaseProductTestCase):
    def setUp(self):
        super(TestStatus, self).setUp()
        self.installer = StandalonePrestoInstaller(self)

    def test_status_uninstalled(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        self.upload_topology()
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_installed_status())

    def test_status_not_started(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_started_status())

    @attr('smoketest')
    def test_status_happy_path(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)
        self.run_prestoadmin('server start')
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.base_status())

    def test_status_only_coordinator(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)

        self.run_prestoadmin('server start -H master')
        # don't run with retries because it won't be able to query the
        # coordinator because the coordinator is set to not be a worker
        status_output = self.run_prestoadmin('server status')
        self.check_status(
            status_output,
            self.single_node_up_status(self.cluster.internal_master))

    def test_status_only_worker(self):
        self.setup_cluster(self.STANDALONE_PRESTO_CLUSTER)

        self.run_prestoadmin('server start -H slave1')
        status_output = self._server_status_with_retries()
        self.check_status(
            status_output,
            self.single_node_up_status(self.cluster.internal_slaves[0]))

        # Check that the slave sees that it's stopped, even though the
        # discovery server is not up.
        self.run_prestoadmin('server stop')
        status_output = self._server_status_with_retries()
        self.check_status(status_output, self.not_started_status())

    def test_connection_to_coordinator_lost(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        topology = {
            "coordinator": "slave1",
            "workers": ["master", "slave2", "slave3"]
        }
        self.upload_topology(topology=topology)
        self.installer.install()
        self.run_prestoadmin('server start')
        self.cluster.stop_host(self.cluster.slaves[0])
        topology = {
            "coordinator": self.cluster.get_down_hostname("slave1"),
            "workers": ["master", "slave2", "slave3"]
        }
        status_output = self._server_status_with_retries()
        statuses = self.node_not_available_status(
            topology, self.cluster.internal_slaves[0], coordinator_down=True)
        self.check_status(status_output, statuses)

    def test_connection_to_worker_lost(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        topology = {
            "coordinator": "slave1",
            "workers": ["master", "slave2", "slave3"]
        }
        self.upload_topology(topology=topology)
        self.installer.install()
        self.run_prestoadmin('server start')
        self.cluster.stop_host(self.cluster.slaves[1])
        topology = {
            "coordinator":
            "slave1",
            "workers":
            ["master",
             self.cluster.get_down_hostname("slave2"), "slave3"]
        }
        status_output = self._server_status_with_retries()
        statuses = self.node_not_available_status(
            topology, self.cluster.internal_slaves[1])
        self.check_status(status_output, statuses)

    def test_status_port_not_8080(self):
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        self.upload_topology()

        port_config = """discovery.uri=http://master:8090
http-server.http.port=8090"""

        self.installer.install(extra_configs=port_config)
        self.run_prestoadmin('server start')
        status_output = self._server_status_with_retries()

        self.check_status(status_output, self.base_status(), 8090)

    def base_status(self, topology=None):
        ips = self.cluster.get_ip_address_dict()
        if not topology:
            topology = {
                'coordinator':
                self.cluster.internal_master,
                'workers': [
                    self.cluster.internal_slaves[0],
                    self.cluster.internal_slaves[1],
                    self.cluster.internal_slaves[2]
                ]
            }
        statuses = []
        hosts_in_status = [topology['coordinator']] + topology['workers'][:]
        for host in hosts_in_status:
            role = 'coordinator' if host is topology['coordinator']\
                else 'worker'
            status = {
                'host': host,
                'role': role,
                'ip': ips[host],
                'is_running': 'Running'
            }
            statuses += [status]
        return statuses

    def not_started_status(self):
        statuses = self.base_status()
        for status in statuses:
            status['ip'] = 'Unknown'
            status['is_running'] = 'Not Running'
            status['error_message'] = '\tNo information available: ' \
                                      'unable to query coordinator'
        return statuses

    def not_installed_status(self):
        statuses = self.base_status()
        for status in statuses:
            status['ip'] = 'Unknown'
            status['is_running'] = 'Not Running'
            status['error_message'] = '\tPresto is not installed.'
        return statuses

    def single_node_up_status(self, node):
        statuses = self.not_started_status()
        for status in statuses:
            if status['host'] is node:
                status['is_running'] = 'Running'
        return statuses

    def node_not_available_status(self,
                                  topology,
                                  node,
                                  coordinator_down=False):
        statuses = self.base_status(topology)
        for status in statuses:
            if status['host'] == node:
                status['is_running'] = 'Not Running'
                status['error_message'] = \
                    self.status_node_connection_error(node)
                status['ip'] = 'Unknown'
                status['host'] = self.cluster.get_down_hostname(node)
            elif coordinator_down:
                status['error_message'] = '\tNo information available: ' \
                                          'unable to query coordinator'
                status['ip'] = 'Unknown'

        return statuses

    def check_status(self, cmd_output, statuses, port=8080):
        expected_output = []
        for status in statuses:
            expected_output += \
                ['Server Status:',
                 '\t%s\(IP: %s, Roles: %s\): %s' %
                 (status['host'], status['ip'], status['role'],
                  status['is_running'])]
            if 'error_message' in status and status['error_message']:
                expected_output += [status['error_message']]
            elif status['is_running'] is 'Running':
                expected_output += \
                    ['\tNode URI\(http\): http://%s:%s' % (status['ip'],
                                                           str(port)),
                     '\tPresto Version: ' + PRESTO_VERSION,
                     '\tNode is active: True',
                     '\tConnectors:     system, tpch']

        self.assertRegexpMatches(cmd_output, '\n'.join(expected_output))

    def _server_status_with_retries(self):
        return self.retry(lambda: self._get_status_until_coordinator_updated())

    def _get_status_until_coordinator_updated(self):
        status_output = self.run_prestoadmin('server status')
        if 'the coordinator has not yet discovered this node' in status_output:
            raise PrestoError('Coordinator has not discovered all nodes yet: '
                              '%s' % status_output)
        if 'Roles: coordinator): Running\n\tNo information available: ' \
           'unable to query coordinator' in status_output:
            raise PrestoError('Coordinator not started up properly yet.'
                              '\nOutput: %s' % status_output)
        return status_output
Esempio n. 54
0
class TestServerInstall(BaseProductTestCase):
    default_workers_config_with_slave1_ = """coordinator=false
discovery.uri=http://slave1:8080
http-server.http.port=8080
query.max-memory-per-node=512MB
query.max-memory=50GB\n"""

    default_coord_config_with_slave1_ = """coordinator=true
discovery-server.enabled=true
discovery.uri=http://slave1:8080
http-server.http.port=8080
node-scheduler.include-coordinator=false
query.max-memory-per-node=512MB
query.max-memory=50GB\n"""

    default_workers_config_regex_ = """coordinator=false
discovery.uri=http:.*:8080
http-server.http.port=8080
query.max-memory-per-node=512MB
query.max-memory=50GB\n"""

    default_coord_config_regex_ = """coordinator=true
discovery-server.enabled=true
discovery.uri=http:.*:8080
http-server.http.port=8080
node-scheduler.include-coordinator=false
query.max-memory-per-node=512MB
query.max-memory=50GB\n"""

    def setUp(self):
        super(TestServerInstall, self).setUp()
        self.setup_cluster(self.PA_ONLY_CLUSTER)
        self.installer = StandalonePrestoInstaller(self)

    def assert_common_configs(self, container):
        self.installer.assert_installed(self, container)
        self.assert_file_content(container, '/etc/presto/jvm.config',
                                 self.default_jvm_config_)
        self.assert_node_config(container, self.default_node_properties_)
        self.assert_has_default_connector(container)

    def assert_installed_with_configs(self, master, slaves):
        self.assert_common_configs(master)
        self.assert_file_content(master,
                                 '/etc/presto/config.properties',
                                 self.default_coord_config_with_slave1_)
        for container in slaves:
            self.assert_common_configs(container)
            self.assert_file_content(container,
                                     '/etc/presto/config.properties',
                                     self.default_workers_config_with_slave1_)

    def assert_installed_with_regex_configs(self, master, slaves):
        self.assert_common_configs(master)
        self.assert_file_content_regex(master,
                                       '/etc/presto/config.properties',
                                       self.default_coord_config_regex_)
        for container in slaves:
            self.assert_common_configs(container)
            self.assert_file_content_regex(container,
                                           '/etc/presto/config.properties',
                                           self.default_workers_config_regex_)

    @attr('smoketest')
    def test_install(self, dummy=False):
        self.upload_topology()

        cmd_output = self.installer.install(dummy)
        expected = installed_all_hosts_output

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in self.cluster.all_hosts():
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)

    def test_install_worker_is_pa_master(self):
        topology = {"coordinator": "slave1",
                    "workers": ["master", "slave2", "slave3"]}
        self.upload_topology(topology)

        cmd_output = self.installer.install(dummy=True)
        expected = install_with_worker_pa_master_out

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        self.assert_installed_with_configs(
            self.cluster.slaves[0],
            [self.cluster.slaves[1],
             self.cluster.slaves[2],
             self.cluster.master])

    def test_install_ext_host_is_pa_master(self):
        topology = {"coordinator": "slave1",
                    "workers": ["slave2", "slave3"]}
        self.upload_topology(topology)

        cmd_output = self.installer.install(dummy=True)
        expected = install_with_ext_host_pa_master_out

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        self.assert_installed_with_configs(
            self.cluster.slaves[0],
            [self.cluster.slaves[1],
             self.cluster.slaves[2]])

    def test_install_when_connector_json_exists(self):
        topology = {"coordinator": "master",
                    "workers": ["slave1"]}
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master
        )

        cmd_output = self.installer.install(dummy=True)
        expected = ['Deploying rpm on master...',
                    'Deploying rpm on slave1...',
                    'Package deployed successfully on: slave1',
                    'Package installed successfully on: slave1',
                    'Package deployed successfully on: master',
                    'Package installed successfully on: master',
                    'Deploying configuration on: master',
                    'Deploying jmx.properties, tpch.properties '
                    'connector configurations on: master ',
                    'Deploying configuration on: slave1',
                    'Deploying jmx.properties, tpch.properties '
                    'connector configurations on: slave1 ']

        actual = cmd_output.splitlines()
        self.assertEqual(sorted(expected), sorted(actual))

        for container in [self.cluster.master,
                          self.cluster.slaves[0]]:
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
            self.assert_has_jmx_connector(container)

    def test_install_when_topology_has_ips(self):
        ips = self.cluster.get_ip_address_dict()
        topology = {"coordinator": ips[self.cluster.master],
                    "workers": [ips[self.cluster.slaves[0]]]}
        self.upload_topology(topology)
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master
        )

        cmd_output = self.installer.install(dummy=True).splitlines()
        expected = [
            r'Deploying rpm on %s...' % ips[self.cluster.master],
            r'Deploying rpm on %s...' % ips[self.cluster.slaves[0]],
            r'Package deployed successfully on: ' + ips[
                self.cluster.master],
            r'Package installed successfully on: ' + ips[
                self.cluster.master],
            r'Package deployed successfully on: '
            + ips[self.cluster.slaves[0]],
            r'Package installed successfully on: ' +
            ips[self.cluster.slaves[0]],
            r'Deploying configuration on: ' +
            ips[self.cluster.master],
            r'Deploying jmx.properties, tpch.properties '
            r'connector configurations on: ' +
            ips[self.cluster.master],
            r'Deploying configuration on: ' +
            ips[self.cluster.slaves[0]],
            r'Deploying jmx.properties, tpch.properties '
            r'connector configurations on: ' +
            ips[self.cluster.slaves[0]]]

        cmd_output.sort()
        expected.sort()
        self.assertRegexpMatchesLineByLine(expected, cmd_output)

        self.assert_installed_with_regex_configs(
            self.cluster.master,
            [self.cluster.slaves[0]])
        for container in [self.cluster.master,
                          self.cluster.slaves[0]]:
            self.assert_has_jmx_connector(container)

    def test_install_interactive_with_hostnames(self):
        self.cluster.write_content_to_host(
            'connector.name=jmx',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master
        )
        rpm_name = self.installer.copy_presto_rpm_to_master()
        self.write_test_configs(self.cluster)

        cmd_output = self.run_script_from_prestoadmin_dir(
            'echo -e "root\n22\n%(master)s\n%(slave1)s\n" | '
            './presto-admin server install /mnt/presto-admin/%(rpm)s ',
            rpm=rpm_name)

        actual = cmd_output.splitlines()
        expected = [r'Enter user name for SSH connection to all nodes: '
                    r'\[root\] '
                    r'Enter port number for SSH connections to all nodes: '
                    r'\[22\] '
                    r'Enter host name or IP address for coordinator node.  '
                    r'Enter an external host name or ip address if this is a '
                    r'multi-node cluster: \[localhost\] '
                    r'Enter host names or IP addresses for worker nodes '
                    r'separated by spaces: '
                    r'\[localhost\] Deploying rpm on .*\.\.\.',
                    r'Package deployed successfully on: ' +
                    self.cluster.internal_master,
                    r'Package installed successfully on: ' +
                    self.cluster.internal_master,
                    r'Package deployed successfully on: ' +
                    self.cluster.internal_slaves[0],
                    r'Package installed successfully on: ' +
                    self.cluster.internal_slaves[0],
                    r'Deploying configuration on: ' +
                    self.cluster.internal_master,
                    r'Deploying jmx.properties, tpch.properties connector '
                    r'configurations on: ' +
                    self.cluster.internal_master,
                    r'Deploying configuration on: ' +
                    self.cluster.internal_slaves[0],
                    r'Deploying jmx.properties, tpch.properties connector '
                    r'configurations on: ' +
                    self.cluster.internal_slaves[0],
                    r'Deploying rpm on .*\.\.\.']

        self.assertRegexpMatchesLineByLine(actual, expected)
        for container in [self.cluster.master,
                          self.cluster.slaves[0]]:
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)
            self.assert_has_jmx_connector(container)

    def test_install_interactive_with_ips(self):
        ips = self.cluster.get_ip_address_dict()
        rpm_name = self.installer.copy_presto_rpm_to_master()
        self.write_test_configs(self.cluster)

        additional_keywords = {
            'rpm': rpm_name,
            'master_ip': ips[self.cluster.master],
            'slave1_ip': ips[self.cluster.slaves[0]]
        }
        cmd_output = self.run_script_from_prestoadmin_dir(
            'echo -e "root\n22\n%(master_ip)s\n%(slave1_ip)s\n" | '
            './presto-admin server install /mnt/presto-admin/%(rpm)s ',
            **additional_keywords).splitlines()
        expected = [r'Enter user name for SSH connection to all nodes: '
                    r'\[root\] '
                    r'Enter port number for SSH connections to all nodes: '
                    r'\[22\] '
                    r'Enter host name or IP address for coordinator node.  '
                    r'Enter an external host name or ip address if this is a '
                    r'multi-node cluster: \[localhost\] '
                    r'Enter host names or IP addresses for worker nodes '
                    r'separated by spaces: '
                    r'\[localhost\] Deploying rpm on .*\.\.\.',
                    r'Package deployed successfully on: ' +
                    ips[self.cluster.master],
                    r'Package installed successfully on: ' +
                    ips[self.cluster.master],
                    r'Package deployed successfully on: '
                    + ips[self.cluster.slaves[0]],
                    r'Package installed successfully on: '
                    + ips[self.cluster.slaves[0]],
                    r'Deploying configuration on: ' +
                    ips[self.cluster.master],
                    r'Deploying tpch.properties connector '
                    r'configurations on: ' +
                    ips[self.cluster.master],
                    r'Deploying configuration on: ' +
                    ips[self.cluster.slaves[0]],
                    r'Deploying tpch.properties connector '
                    r'configurations on: ' +
                    ips[self.cluster.slaves[0]],
                    r'Deploying rpm on .*\.\.\.']

        cmd_output.sort()
        expected.sort()
        for expected_regexp, actual_line in zip(expected, cmd_output):
            self.assertRegexpMatches(actual_line, expected_regexp)

        self.assert_installed_with_regex_configs(
            self.cluster.master,
            [self.cluster.slaves[0]])

    def test_install_with_wrong_topology(self):
        rpm_name = self.installer.copy_presto_rpm_to_master()
        topology = {'coordinator': 'dummy_master', 'workers': ['slave1']}
        self.upload_topology(topology)
        expected = 'u\'dummy_master\' is not a valid ip address or' \
                   ' host name.' \
                   '  More detailed information can be found in ' \
                   '/var/log/prestoadmin/presto-admin.log\n'
        self.assertRaisesRegexp(OSError,
                                expected,
                                self.run_prestoadmin,
                                'server install /mnt/presto-admin/%(rpm)s ',
                                rpm=rpm_name)

    def test_install_with_malformed_topology(self):
        rpm_name = self.installer.copy_presto_rpm_to_master()
        topology = {'coordinator': 'master',
                    'workers': 'slave1' 'slave2'}
        self.upload_topology(topology)
        expected = 'Workers must be of type list.  Found <type \'unicode\'>.' \
                   '  More detailed information can be found in ' \
                   '/var/log/prestoadmin/presto-admin.log'

        self.assertRaisesRegexp(OSError,
                                expected,
                                self.run_prestoadmin,
                                'server install /mnt/presto-admin/%(rpm)s ',
                                rpm=rpm_name)

    def test_install_with_malformed_connector(self):
        self.upload_topology()
        self.cluster.write_content_to_host(
            'connectr.typo:invalid',
            os.path.join(constants.CONNECTORS_DIR, 'jmx.properties'),
            self.cluster.master
        )
        actual_out = self.installer.install(dummy=True)
        expected = 'Underlying exception:\n    Catalog configuration ' \
                   'jmx.properties does not contain connector.name'
        self.assertRegexpMatches(actual_out, expected)

        for container in self.cluster.all_hosts():
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)

    def test_connection_to_coord_lost(self):
        down_node = self.cluster.internal_slaves[0]
        topology = {"coordinator": down_node,
                    "workers": [self.cluster.internal_master,
                                self.cluster.internal_slaves[1],
                                self.cluster.internal_slaves[2]]}
        self.upload_topology(topology=topology)
        self.cluster.stop_host(
            self.cluster.slaves[0])

        actual_out = self.installer.install(dummy=True)
        self.assertRegexpMatches(
            actual_out,
            self.down_node_connection_error(down_node)
        )

        for container in [self.cluster.master,
                          self.cluster.slaves[1],
                          self.cluster.slaves[2]]:
            self.assert_common_configs(container)
            self.assert_file_content(
                container,
                '/etc/presto/config.properties',
                self.default_workers_config_with_slave1_.replace(
                    down_node, self.cluster.get_down_hostname(down_node)
                )
            )

    @docker_only
    def test_install_with_no_perm_to_local_path(self):
        rpm_name = self.installer.copy_presto_rpm_to_master()
        self.upload_topology()
        self.run_prestoadmin("configuration deploy")

        script = 'chmod 600 /mnt/presto-admin/%(rpm)s; su app-admin -c ' \
                 '"./presto-admin server install /mnt/presto-admin/%(rpm)s "'
        error_msg = '\nFatal error: [%(host)s] error: ' \
                    '/mnt/presto-admin/%(rpm)s: ' \
                    'open failed: Permission denied\n\nAborting.\n'
        expected = ''
        for host in self.cluster.all_internal_hosts():
            expected += error_msg % {'host': host, 'rpm': rpm_name}
        actual = self.run_script_from_prestoadmin_dir(script, rpm=rpm_name)
        self.assertEqualIgnoringOrder(actual, expected)

    def test_install_twice(self):
        self.test_install(dummy=True)
        output = self.installer.install(dummy=True)

        with open(os.path.join(LOCAL_RESOURCES_DIR, 'install_twice.txt'), 'r') \
                as f:
            expected = f.read()
        expected = self.escape_for_regex(
            self.replace_keywords(expected))

        self.assertRegexpMatchesLineByLine(output.splitlines(),
                                           expected.splitlines())
        for container in self.cluster.all_hosts():
            self.installer.assert_installed(self, container)
            self.assert_has_default_config(container)
            self.assert_has_default_connector(container)