Esempio n. 1
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            pacemaker_attributes = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

            controller_space_on_root = get_pacemaker_nodes_attributes(
                pacemaker_attributes)[
                    self.primary_controller_fqdn]['root_free']

            logger.info("Free space in root on primary controller - {}".format(
                controller_space_on_root))

            controller_space_to_filled = str(
                int(controller_space_on_root) - self.rabbit_disk_free_limit -
                1)

            logger.info("Need to fill space on root - {}".format(
                controller_space_to_filled))

            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile2'.format(
                    controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile2')
Esempio n. 2
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            pacemaker_attributes = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

            controller_space_on_root = get_pacemaker_nodes_attributes(
                pacemaker_attributes)[self.primary_controller_fqdn][
                'root_free']

            logger.info(
                "Free space in root on primary controller - {}".format(
                    controller_space_on_root
                ))

            controller_space_to_filled = str(
                int(
                    controller_space_on_root
                ) - self.rabbit_disk_free_limit - 1)

            logger.info(
                "Need to fill space on root - {}".format(
                    controller_space_to_filled
                ))

            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile2'.format(
                    controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile2')
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        node = self.fuel_web.get_nailgun_node_by_name(
            self.primary_controller.name)
        pacemaker_attributes = self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='cibadmin --query --scope status'
        )['stdout_str']
        controller_space_on_root = get_pacemaker_nodes_attributes(
            pacemaker_attributes)[self.primary_controller_fqdn]['root_free']

        logger.info("Free space in root on primary controller - {}".format(
                    controller_space_on_root))

        controller_space_to_filled = str(
            int(controller_space_on_root) - self.rabbit_disk_free_limit - 1
        )

        logger.info("Need to fill space on root - {}".format(
            controller_space_to_filled))

        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='fallocate -l {}M /root/bigfile2 && sync'.format(
                controller_space_to_filled)
        )
        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='ls /root/bigfile2',
            assert_ec_equal=[0])
Esempio n. 4
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        node = self.fuel_web.get_nailgun_node_by_name(
            self.primary_controller.name)
        pacemaker_attributes = self.ssh_manager.execute_on_remote(
            ip=node['ip'], cmd='cibadmin --query --scope status')['stdout_str']
        controller_space_on_root = get_pacemaker_nodes_attributes(
            pacemaker_attributes)[self.primary_controller_fqdn]['root_free']

        logger.info("Free space in root on primary controller - {}".format(
            controller_space_on_root))

        controller_space_to_filled = str(
            int(controller_space_on_root) - self.rabbit_disk_free_limit - 1)

        if int(controller_space_to_filled) < 1:
            logger.info(
                "Nothing to do."
                " Free space in root partition already less than {}.".format(
                    self.rabbit_disk_free_limit))
            return

        logger.info("Need to fill space on root - {}".format(
            controller_space_to_filled))

        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='fallocate -l {}M /root/bigfile2 && sync'.format(
                controller_space_to_filled))
        self.ssh_manager.execute_on_remote(ip=node['ip'],
                                           cmd='ls /root/bigfile2',
                                           assert_ec_equal=[0])
Esempio n. 5
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = run_on_remote_get_results(
         remote, 'cibadmin --query --scope status')['stdout_str']
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = remote.check_call(
         'cibadmin --query --scope status').stdout_str
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Esempio n. 7
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = remote.check_call(
         'cibadmin --query --scope status').stdout_str
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Esempio n. 8
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = run_on_remote_get_results(
         remote, 'cibadmin --query --scope status')[
         'stdout_str']
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Esempio n. 9
0
 def checking_health_disk_attribute_is_not_present():
     logger.info("Checking for '#health_disk' attribute "
                 "is not present on node {}".format(
                     self.primary_controller_fqdn))
     cibadmin_status_xml = remote.check_call(
         'cibadmin --query --scope status').stdout_str
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' not in pcs_attribs[
         self.primary_controller_fqdn]
 def checking_health_disk_attribute_is_not_present():
     logger.info(
         "Checking for '#health_disk' attribute "
         "is not present on node {}".format(
             self.primary_controller_fqdn))
     cibadmin_status_xml = remote.check_call(
         'cibadmin --query --scope status').stdout_str
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' not in pcs_attribs[
         self.primary_controller_fqdn]
Esempio n. 11
0
    def get_pcs_initial_state(self):
        """Get controllers initial status in pacemaker"""
        self.primary_controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])

        self.primary_controller_fqdn = str(
            self.fuel_web.fqdn(self.primary_controller))

        primary_ctrl = \
            self.primary_controller.get_ip_address_by_network_name('admin')
        pcs_status = parse_pcs_status_xml(primary_ctrl)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            root_free = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

        self.primary_controller_space_on_root = get_pacemaker_nodes_attributes(
            root_free)[self.primary_controller_fqdn]['root_free']

        self.disk_monitor_limit = 512

        self.rabbit_disk_free_limit = 5

        self.pacemaker_restart_timeout = 600

        self.pcs_check_timeout = 300

        self.primary_controller_space_to_filled = str(
            int(
                self.primary_controller_space_on_root
            ) - self.disk_monitor_limit - 1)

        self.pcs_status = get_pcs_nodes(pcs_status)

        self.slave_nodes_fqdn = list(
            set(self.pcs_status.keys()).difference(
                set(self.primary_controller_fqdn.split())))
        running_resources_slave_1 = int(
            self.pcs_status[self.slave_nodes_fqdn[0]]['resources_running'])

        running_resources_slave_2 = int(
            self.pcs_status[self.slave_nodes_fqdn[1]]['resources_running'])

        self.slave_node_running_resources = str(min(running_resources_slave_1,
                                                    running_resources_slave_2
                                                    )
                                                )
Esempio n. 12
0
    def get_pcs_initial_state(self):
        """Get controllers initial status in pacemaker"""
        self.primary_controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])

        self.primary_controller_fqdn = str(
            self.fuel_web.fqdn(self.primary_controller))

        primary_ctrl = \
            self.primary_controller.get_ip_address_by_network_name('admin')
        pcs_status = parse_pcs_status_xml(primary_ctrl)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            root_free = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

        self.primary_controller_space_on_root = get_pacemaker_nodes_attributes(
            root_free)[self.primary_controller_fqdn]['root_free']

        self.disk_monitor_limit = 512

        self.rabbit_disk_free_limit = 5

        self.pacemaker_restart_timeout = 600

        self.pcs_check_timeout = 300

        self.primary_controller_space_to_filled = str(
            int(self.primary_controller_space_on_root) -
            self.disk_monitor_limit - 1)

        self.pcs_status = get_pcs_nodes(pcs_status)

        self.slave_nodes_fqdn = list(
            set(self.pcs_status.keys()).difference(
                set(self.primary_controller_fqdn.split())))
        running_resources_slave_1 = int(
            self.pcs_status[self.slave_nodes_fqdn[0]]['resources_running'])

        running_resources_slave_2 = int(
            self.pcs_status[self.slave_nodes_fqdn[1]]['resources_running'])

        self.slave_node_running_resources = str(
            min(running_resources_slave_1, running_resources_slave_2))
Esempio n. 13
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        node = self.fuel_web.get_nailgun_node_by_name(
            self.primary_controller.name)
        pacemaker_attributes = self.ssh_manager.execute_on_remote(
            ip=node['ip'], cmd='cibadmin --query --scope status')['stdout_str']
        controller_space_on_root = get_pacemaker_nodes_attributes(
            pacemaker_attributes)[self.primary_controller_fqdn]['root_free']

        logger.info("Free space in root on primary controller - {}".format(
            controller_space_on_root))

        controller_space_to_filled = str(
            int(controller_space_on_root) - self.rabbit_disk_free_limit - 1)

        logger.info("Need to fill space on root - {}".format(
            controller_space_to_filled))

        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='fallocate -l {}M /root/bigfile2'.format(
                controller_space_to_filled))
        check_file_exists(node['ip'], '/root/bigfile2')