예제 #1
0
 def check_ssh_connection():
     """Try to close fuelmenu and check ssh connection"""
     try:
         _tcp_ping(
             self.d_env.nodes().admin.get_ip_address_by_network_name(
                 self.d_env.admin_net), 22)
     except Exception:
         #  send F8 trying to exit fuelmenu
         self.d_env.nodes().admin.send_keys("<F8>\n")
         return False
     return True
예제 #2
0
 def check_ssh_connection():
     """Try to close fuelmenu and check ssh connection"""
     try:
         _tcp_ping(
             self.d_env.nodes(
             ).admin.get_ip_address_by_network_name
             (self.d_env.admin_net), 22)
     except Exception:
         #  send F8 trying to exit fuelmenu
         self.d_env.nodes().admin.send_keys("<F8>\n")
         return False
     return True
예제 #3
0
 def wait_for_provisioning(self,
                           timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
     _wait(lambda: _tcp_ping(
         self.d_env.nodes(
         ).admin.get_ip_address_by_network_name
         (self.d_env.admin_net), 22), timeout=timeout)
예제 #4
0
 def wait_for_provisioning(self):
     _wait(lambda: _tcp_ping(
         self.nodes().admin.get_ip_address_by_network_name
         (self.admin_net), 22), timeout=5 * 60)
예제 #5
0
    def negative_auto_cic_maintenance_mode(self):
        """Check negative scenario for auto maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Change UMM.CONF
            4. Unexpected reboot
            5. Check the controller not switching in maintenance mode
            6. Check the controller become available

        Duration 85m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        for nailgun_node in self.env.d_env.nodes().slaves[0:3]:
            with self.fuel_web.get_ssh_for_node(nailgun_node.name) as remote:
                assert_true('True' in check_available_mode(remote),
                            "Maintenance mode is not available")

                logger.info('Change UMM.CONF on node %s', nailgun_node.name)
                command1 = ("echo -e 'UMM=yes\nREBOOT_COUNT=0\n"
                            "COUNTER_RESET_TIME=10' > /etc/umm.conf")

                result = remote.execute(command1)
                assert_equal(
                    result['exit_code'], 0,
                    'Failed to execute "{0}" on remote host: {1}'.format(
                        command1, result))

                result = remote.execute('umm disable')
                assert_equal(
                    result['exit_code'], 0,
                    'Failed to execute "{0}" on remote host: {1}'.format(
                        'umm disable', result))

                assert_false('True' in check_available_mode(remote),
                             "Maintenance mode should not be available")

                logger.info('Unexpected reboot on node %s', nailgun_node.name)
                command2 = ('reboot --force >/dev/null & ')
                result = remote.execute(command2)
                assert_equal(
                    result['exit_code'], 0,
                    'Failed to execute "{0}" on remote host: {1}'.format(
                        command2, result))

            # Node don't have enough time for set offline status
            # after reboot --force
            # Just waiting

            _ip = self.fuel_web.get_nailgun_node_by_name(
                nailgun_node.name)['ip']
            _wait(lambda: _tcp_ping(_ip, 22), timeout=120)

            logger.info(
                'Wait a %s node online status after unexpected '
                'reboot', nailgun_node.name)
            self.fuel_web.wait_nodes_get_online_state([nailgun_node])

            logger.info(
                'Check that %s node not in maintenance mode after'
                ' unexpected reboot', nailgun_node.name)

            with self.fuel_web.get_ssh_for_node(nailgun_node.name) as remote:
                assert_false('True' in check_auto_mode(remote),
                             "Maintenance mode should not switched")

            # Wait until MySQL Galera is UP on some controller
            self.fuel_web.wait_mysql_galera_is_up(
                [n.name for n in self.env.d_env.nodes().slaves[0:3]])

            # Wait until Cinder services UP on a controller
            self.fuel_web.wait_cinder_is_up(
                [n.name for n in self.env.d_env.nodes().slaves[0:3]])

            _wait(lambda: self.fuel_web.run_single_ostf_test(
                cluster_id,
                test_sets=['sanity'],
                test_name=map_ostf.OSTF_TEST_MAPPING.get(
                    'Check that required services are running')),
                  timeout=1500)
            logger.debug("Required services are running")

            _wait(lambda: self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
                  timeout=1500)
            logger.debug("HA tests are pass now")

            try:
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
            except AssertionError:
                logger.debug("Test failed from first probe,"
                             " we sleep 600 second try one more time"
                             " and if it fails again - test will fails ")
                time.sleep(600)
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
예제 #6
0
 def wait_for_provisioning(self):
     _wait(lambda: _tcp_ping(
         self.nodes().admin.get_ip_address_by_network_name(self.admin_net),
         22),
           timeout=5 * 60)
    def negative_auto_cic_maintenance_mode(self):
        """Check negative scenario for auto maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Change UMM.CONF
            4. Unexpected reboot
            5. Check the controller not switching in maintenance mode
            6. Check the controller become available

        Duration 85m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        for nailgun_node in self.env.d_env.nodes().slaves[0:3]:
            with self.fuel_web.get_ssh_for_node(nailgun_node.name) as remote:
                assert_true('True' in check_available_mode(remote),
                            "Maintenance mode is not available")

                logger.info('Change UMM.CONF on node %s', nailgun_node.name)
                command1 = ("echo -e 'UMM=yes\nREBOOT_COUNT=0\n"
                            "COUNTER_RESET_TIME=10' > /etc/umm.conf")

                result = remote.execute(command1)
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format(command1, result))

                result = remote.execute('umm disable')
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format('umm disable', result))

                assert_false('True' in check_available_mode(remote),
                             "Maintenance mode should not be available")

                logger.info('Unexpected reboot on node %s', nailgun_node.name)
                command2 = ('reboot --force >/dev/null & ')
                result = remote.execute(command2)
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format(command2, result))

            # Node don't have enough time for set offline status
            # after reboot --force
            # Just waiting

            _ip = self.fuel_web.get_nailgun_node_by_name(
                nailgun_node.name)['ip']
            _wait(lambda: _tcp_ping(_ip, 22), timeout=120)

            logger.info('Wait a %s node online status after unexpected '
                        'reboot', nailgun_node.name)
            self.fuel_web.wait_nodes_get_online_state([nailgun_node])

            logger.info('Check that %s node not in maintenance mode after'
                        ' unexpected reboot', nailgun_node.name)

            with self.fuel_web.get_ssh_for_node(nailgun_node.name) as remote:
                assert_false('True' in check_auto_mode(remote),
                             "Maintenance mode should not switched")

            # Wait until MySQL Galera is UP on some controller
            self.fuel_web.wait_mysql_galera_is_up(
                [n.name for n in self.env.d_env.nodes().slaves[0:3]])

            # Wait until Cinder services UP on a controller
            self.fuel_web.wait_cinder_is_up(
                [n.name for n in self.env.d_env.nodes().slaves[0:3]])

            _wait(lambda:
                  self.fuel_web.run_single_ostf_test(
                      cluster_id, test_sets=['sanity'],
                      test_name=map_ostf.OSTF_TEST_MAPPING.get(
                          'Check that required services are running')),
                  timeout=1500)
            logger.debug("Required services are running")

            _wait(lambda:
                  self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
                  timeout=1500)
            logger.debug("HA tests are pass now")

            try:
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
            except AssertionError:
                logger.debug("Test failed from first probe,"
                             " we sleep 600 second try one more time"
                             " and if it fails again - test will fails ")
                time.sleep(600)
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
예제 #8
0
        """Create SSH-connection to the network

        :rtype : SSHClient
        """
        return SSHClient(
            self.get_ip_address_by_network_name(network_name),
            username=login,
            password=password,
            private_keys=private_keys,
        )

    def send_keys(self, keys):
        self.driver.node_send_keys(self, keys)

    def await(self, network_name, timeout=120, by_port=22):
        _wait(lambda: _tcp_ping(self.get_ip_address_by_network_name(network_name), by_port), timeout=timeout)

    def define(self):
        self.driver.node_define(self)
        self.save()

    def start(self):
        self.create(verbose=False)

    def create(self, verbose=False):
        if verbose or not self.driver.node_active(self):
            self.driver.node_create(self)

    def destroy(self, verbose=False):
        if verbose or self.driver.node_active(self):
            self.driver.node_destroy(self)
    def manual_cic_maintenance_mode(self):
        """Check manual maintenance mode for controller

        Scenario:
            1. Revert snapshot
            2. Switch in maintenance mode
            3. Wait until controller is rebooting
            4. Exit maintenance mode
            5. Check the controller become available

        Duration 155m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id,
            ['controller'])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for devops_node in d_ctrls:
            with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
                assert_true('True' in check_available_mode(remote),
                            "Maintenance mode is not available")

                logger.info('Maintenance mode for node %s', devops_node.name)
                result = remote.execute('umm on')
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format('umm on', result))
            logger.info('Wait a %s node offline status after switching '
                        'maintenance mode ', devops_node.name)
            try:
                wait(
                    lambda: not
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'], timeout=60 * 10)
            except TimeoutError:
                assert_false(
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'],
                    'Node {0} has not become offline after'
                    'switching maintenance mode'.format(devops_node.name))

            logger.info('Check that %s node in maintenance mode after '
                        'switching', devops_node.name)

            _ip = self.fuel_web.get_nailgun_node_by_name(
                devops_node.name)['ip']
            _wait(lambda: _tcp_ping(_ip, 22), timeout=60 * 10)
            with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
                assert_true('True' in check_auto_mode(remote),
                            "Maintenance mode is not switch")

                result = remote.execute('umm off')
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format('umm off', result))

            logger.info('Wait a %s node online status', devops_node.name)
            try:
                wait(
                    lambda:
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'], timeout=60 * 10)
            except TimeoutError:
                assert_true(
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'],
                    'Node {0} has not become online after '
                    'exiting maintenance mode'.format(devops_node.name))

            # Wait until MySQL Galera is UP on some controller
            self.fuel_web.wait_mysql_galera_is_up(
                [n.name for n in d_ctrls])

            # Wait until Cinder services UP on a controller
            self.fuel_web.wait_cinder_is_up(
                [n.name for n in d_ctrls])

            _wait(lambda:
                  self.fuel_web.run_single_ostf_test(
                      cluster_id, test_sets=['sanity'],
                      test_name=map_ostf.OSTF_TEST_MAPPING.get(
                          'Check that required services are running')),
                  timeout=1500)
            logger.debug("Required services are running")

            _wait(lambda:
                  self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
                  timeout=1500)
            logger.debug("HA tests are pass now")

            try:
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
            except AssertionError:
                logger.debug("Test failed from first probe,"
                             " we sleep 600 second try one more time"
                             " and if it fails again - test will fails ")
                time.sleep(600)
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
    def auto_cic_maintenance_mode(self):
        """Check auto maintenance mode for controller

        Scenario:
            1. Revert snapshot
            2. Unexpected reboot
            3. Wait until controller is switching in maintenance mode
            4. Exit maintenance mode
            5. Check the controller become available

        Duration 155m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id,
            ['controller'])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for devops_node in d_ctrls:
            with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
                assert_true('True' in check_available_mode(remote),
                            "Maintenance mode is not available")

                logger.info('Change UMM.CONF on node %s', devops_node.name)
                command1 = ("echo -e 'UMM=yes\nREBOOT_COUNT=0\n"
                            "COUNTER_RESET_TIME=10' > /etc/umm.conf")

                result = remote.execute(command1)
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format(command1, result))

                logger.info('Unexpected reboot on node %s', devops_node.name)
                command2 = 'reboot --force >/dev/null & '
                result = remote.execute(command2)
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format(command2, result))

            logger.info('Wait a %s node offline status after unexpected '
                        'reboot', devops_node.name)
            try:
                wait(
                    lambda: not
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'], timeout=60 * 10)
            except TimeoutError:
                assert_false(
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'],
                    'Node {0} has not become offline after unexpected'
                    'reboot'.format(devops_node.name))

            logger.info('Check that %s node in maintenance mode after'
                        ' unexpected reboot', devops_node.name)

            _ip = self.fuel_web.get_nailgun_node_by_name(
                devops_node.name)['ip']
            _wait(lambda: _tcp_ping(_ip, 22), timeout=60 * 10)
            with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
                assert_true('True' in check_auto_mode(remote),
                            "Maintenance mode is not switch")

                result = remote.execute('umm off')
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format('umm off', result))
                # Wait umm stops
                time.sleep(30)
                command3 = ("echo -e 'UMM=yes\nREBOOT_COUNT=2\n"
                            "COUNTER_RESET_TIME=10' > /etc/umm.conf")
                result = remote.execute(command3)
                assert_equal(result['exit_code'], 0,
                             'Failed to execute "{0}" on remote host: {1}'.
                             format(command3, result))

            logger.info('Wait a %s node online status', devops_node.name)
            try:
                wait(
                    lambda:
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'], timeout=90 * 10)
            except TimeoutError:
                assert_true(
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)
                    ['online'],
                    'Node {0} has not become online after umm off'.format(
                        devops_node.name))

            # Wait until MySQL Galera is UP on some controller
            self.fuel_web.wait_mysql_galera_is_up(
                [n.name for n in d_ctrls])

            # Wait until Cinder services UP on a controller
            self.fuel_web.wait_cinder_is_up(
                [n.name for n in d_ctrls])

            _wait(lambda:
                  self.fuel_web.run_single_ostf_test(
                      cluster_id, test_sets=['sanity'],
                      test_name=map_ostf.OSTF_TEST_MAPPING.get(
                          'Check that required services are running')),
                  timeout=1500)
            logger.debug("Required services are running")

            _wait(lambda:
                  self.fuel_web.run_ostf(cluster_id, test_sets=['ha']),
                  timeout=1500)
            logger.debug("HA tests are pass now")

            try:
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
            except AssertionError:
                logger.debug("Test failed from first probe,"
                             " we sleep 600 second try one more time"
                             " and if it fails again - test will fails ")
                time.sleep(600)
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['smoke', 'sanity'])
예제 #11
0
 def wait_for_provisioning(self,
                           timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
     _wait(lambda: _tcp_ping(
         self.d_env.nodes().admin.get_ip_address_by_network_name(
             self.d_env.admin_net), 22),
           timeout=timeout)
예제 #12
0
    def remote(self, network_name, login, password=None, private_keys=None):
        """
        :rtype : SSHClient
        """
        return SSHClient(
            self.get_ip_address_by_network_name(network_name),
            username=login,
            password=password, private_keys=private_keys)

    def send_keys(self, keys):
        self.driver.node_send_keys(self, keys)

    def await(self, network_name, timeout=120):
        _wait(
            lambda: _tcp_ping(
                self.get_ip_address_by_network_name(network_name), 22),
            timeout=timeout)

    def define(self):
        self.driver.node_define(self)
        self.save()

    def start(self):
        self.create(verbose=False)

    def create(self, verbose=False):
        if verbose or not self.driver.node_active(self):
            self.driver.node_create(self)

    def destroy(self, verbose=False):
        if verbose or self.driver.node_active(self):
예제 #13
0
 def await (self, network_name, timeout=120):
     _wait(lambda: _tcp_ping(
         self.get_ip_address_by_network_name(network_name), 22),
           timeout=timeout)