Example #1
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            pacemaker_attributes = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

            controller_space_on_root = get_pacemaker_nodes_attributes(
                pacemaker_attributes)[self.primary_controller_fqdn][
                'root_free']

            logger.info(
                "Free space in root on primary controller - {}".format(
                    controller_space_on_root
                ))

            controller_space_to_filled = str(
                int(
                    controller_space_on_root
                ) - self.rabbit_disk_free_limit - 1)

            logger.info(
                "Need to fill space on root - {}".format(
                    controller_space_to_filled
                ))

            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile2'.format(
                    controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile2')
Example #2
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            pacemaker_attributes = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

            controller_space_on_root = get_pacemaker_nodes_attributes(
                pacemaker_attributes)[
                    self.primary_controller_fqdn]['root_free']

            logger.info("Free space in root on primary controller - {}".format(
                controller_space_on_root))

            controller_space_to_filled = str(
                int(controller_space_on_root) - self.rabbit_disk_free_limit -
                1)

            logger.info("Need to fill space on root - {}".format(
                controller_space_to_filled))

            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile2'.format(
                    controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile2')
Example #3
0
def verify_bootstrap_on_node(remote, os_type, uuid=None):
    os_type = os_type.lower()
    if os_type not in ['ubuntu', 'centos']:
        raise Exception("Only Ubuntu and CentOS are supported, "
                        "you have chosen {0}".format(os_type))

    logger.info("Verify bootstrap on slave {0}".format(remote.host))

    cmd = 'cat /etc/*release'
    output = run_on_remote_get_results(remote, cmd)['stdout_str'].lower()
    assert_true(
        os_type in output, "Slave {0} doesn't use {1} image for bootstrap "
        "after {1} images were enabled, /etc/release "
        "content: {2}".format(remote.host, os_type, output))

    if os_type == 'centos' or uuid is None:
        return

    cmd = "cat /etc/nailgun-agent/config.yaml"
    output = yaml.load(run_on_remote_get_results(remote, cmd)['stdout_str'])
    actual_uuid = output.get("runtime_uuid")
    assert_equal(
        actual_uuid, uuid,
        "Actual uuid {0} is not the same as expected {1}".format(
            actual_uuid, uuid))
Example #4
0
    def clean_up_space_on_root(self):
        """Clean up space on root filesystem on primary controller"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:
            run_on_remote_get_results(remote,
                                      'rm /root/bigfile /root/bigfile2')

            run_on_remote_get_results(
                remote, 'crm node status-attr {} delete "#health_disk"'.format(
                    self.primary_controller_fqdn))
Example #5
0
    def clean_up_space_on_root(self):
        """Clean up space on root filesystem on primary controller"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:
            run_on_remote_get_results(
                remote, 'rm /root/bigfile /root/bigfile2')

            run_on_remote_get_results(
                remote,
                'crm node status-attr {} delete "#health_disk"'.format(
                    self.primary_controller_fqdn))
    def reconfigure_keystone_to_use_ldap(self):
        """Reconfigure neutron ml2 VLAN range

        Scenario:
            1. Revert snapshot "deploy_neutron_vlan_ha"
            2. Upload a new openstack configuration
            3. Try to apply a new keystone configuration
            4. Wait for failing of deployment task
            5. Check that reason of failing is impossibility of
               the connection to LDAP server

        Snapshot reconfigure_keystone_to_use_ldap

        """
        self.show_step(1)
        self.env.revert_snapshot("deploy_neutron_vlan_ha")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        ldap_cntrllr = controllers[0]

        self.show_step(2)
        config = utils.get_config_template('keystone_ldap')
        self.fuel_web.client.upload_configuration(
            config,
            cluster_id,
            node_id=ldap_cntrllr['id'])

        self.show_step(3)
        task = self.fuel_web.client.apply_configuration(
            cluster_id,
            node_id=ldap_cntrllr['id'])

        self.show_step(4)
        try:
            self.fuel_web.assert_task_success(task, timeout=1800, interval=30)
        except AssertionError:
            pass
        else:
            raise Exception("New configuration was not applied")

        self.show_step(5)
        with self.env.d_env.get_ssh_to_remote(ldap_cntrllr['ip']) as remote:
            log_path = '/var/log/puppet.log'
            cmd = "grep \"Can't contact LDAP server\" {0}".format(log_path)
            utils.run_on_remote_get_results(remote, cmd)

        self.env.make_snapshot("reconfigure_keystone_to_use_ldap",
                               is_make=True)
    def reconfigure_keystone_to_use_ldap(self):
        """Reconfigure keystone to use LDAP

        Scenario:
            1. Revert snapshot "basic_env_for_reconfiguration"
            2. Upload a new openstack configuration
            3. Try to apply a new keystone configuration
            4. Wait for failing of deployment task
            5. Check that reason of failing is impossibility of
               the connection to LDAP server

        Snapshot: reconfigure_keystone_to_use_ldap

        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("basic_env_for_reconfiguration")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        ldap_cntrllr = controllers[0]

        self.show_step(2)
        config = utils.get_config_template('keystone_ldap')
        self.fuel_web.client.upload_configuration(
            config,
            cluster_id,
            node_id=ldap_cntrllr['id'])

        self.show_step(3)
        task = self.fuel_web.client.apply_configuration(
            cluster_id,
            node_id=ldap_cntrllr['id'])

        self.show_step(4)
        try:
            self.fuel_web.assert_task_success(task, timeout=1800, interval=30)
        except AssertionError:
            pass
        else:
            raise Exception("New configuration was not applied")

        self.show_step(5)
        with self.env.d_env.get_ssh_to_remote(ldap_cntrllr['ip']) as remote:
            log_path = '/var/log/puppet.log'
            cmd = "grep \"Can't contact LDAP server\" {0}".format(log_path)
            utils.run_on_remote_get_results(remote, cmd)

        self.env.make_snapshot("reconfigure_keystone_to_use_ldap")
Example #8
0
    def fill_root_above_rabbit_disk_free_limit(self):
        """Filling root filesystem on primary controller"""

        logger.info("Free space in root on primary controller - {}".format(
            self.primary_controller_space_on_root))

        logger.info("Need to fill space on root - {}".format(
            self.primary_controller_space_to_filled))

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:
            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile'.format(
                    self.primary_controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile')
Example #9
0
def parse_pcs_status_xml(remote):
    """Parse 'pcs status xml'. <Nodes> section
    :param remote: SSHClient instance
    :return: nested dictionary with node-fqdn and attribute name as keys
    """
    pcs_status_dict = run_on_remote_get_results(
        remote, 'pcs status xml')['stdout_str']
    return pcs_status_dict
Example #10
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = run_on_remote_get_results(
         remote, 'cibadmin --query --scope status')['stdout_str']
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Example #11
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = run_on_remote_get_results(
         remote, 'cibadmin --query --scope status')[
         'stdout_str']
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Example #12
0
    def fill_root_above_rabbit_disk_free_limit(self):
        """Filling root filesystem on primary controller"""

        logger.info(
            "Free space in root on primary controller - {}".format(
                self.primary_controller_space_on_root
            ))

        logger.info(
            "Need to fill space on root - {}".format(
                self.primary_controller_space_to_filled
            ))

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:
            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile'.format(
                    self.primary_controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile')
Example #13
0
    def get_pcs_initial_state(self):
        """Get controllers initial status in pacemaker"""
        self.primary_controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])

        self.primary_controller_fqdn = str(
            self.fuel_web.fqdn(self.primary_controller))

        primary_ctrl = \
            self.primary_controller.get_ip_address_by_network_name('admin')
        pcs_status = parse_pcs_status_xml(primary_ctrl)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            root_free = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

        self.primary_controller_space_on_root = get_pacemaker_nodes_attributes(
            root_free)[self.primary_controller_fqdn]['root_free']

        self.disk_monitor_limit = 512

        self.rabbit_disk_free_limit = 5

        self.pacemaker_restart_timeout = 600

        self.pcs_check_timeout = 300

        self.primary_controller_space_to_filled = str(
            int(
                self.primary_controller_space_on_root
            ) - self.disk_monitor_limit - 1)

        self.pcs_status = get_pcs_nodes(pcs_status)

        self.slave_nodes_fqdn = list(
            set(self.pcs_status.keys()).difference(
                set(self.primary_controller_fqdn.split())))
        running_resources_slave_1 = int(
            self.pcs_status[self.slave_nodes_fqdn[0]]['resources_running'])

        running_resources_slave_2 = int(
            self.pcs_status[self.slave_nodes_fqdn[1]]['resources_running'])

        self.slave_node_running_resources = str(min(running_resources_slave_1,
                                                    running_resources_slave_2
                                                    )
                                                )
Example #14
0
def verify_bootstrap_on_node(remote, os_type, uuid=None):
    os_type = os_type.lower()
    if os_type not in ['ubuntu', 'centos']:
        raise Exception("Only Ubuntu and CentOS are supported, "
                        "you have chosen {0}".format(os_type))

    logger.info("Verify bootstrap on slave {0}".format(remote.host))

    cmd = 'cat /etc/*release'
    output = run_on_remote_get_results(remote, cmd)['stdout_str'].lower()
    assert_true(os_type in output,
                "Slave {0} doesn't use {1} image for bootstrap "
                "after {1} images were enabled, /etc/release "
                "content: {2}".format(remote.host, os_type, output))

    if os_type == 'centos' or uuid is None:
        return

    cmd = "cat /etc/nailgun-agent/config.yaml"
    output = yaml.load(run_on_remote_get_results(remote, cmd)['stdout_str'])
    actual_uuid = output.get("runtime_uuid")
    assert_equal(actual_uuid, uuid,
                 "Actual uuid {0} is not the same as expected {1}"
                 .format(actual_uuid, uuid))
Example #15
0
    def get_pcs_initial_state(self):
        """Get controllers initial status in pacemaker"""
        self.primary_controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])

        self.primary_controller_fqdn = str(
            self.fuel_web.fqdn(self.primary_controller))

        primary_ctrl = \
            self.primary_controller.get_ip_address_by_network_name('admin')
        pcs_status = parse_pcs_status_xml(primary_ctrl)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            root_free = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

        self.primary_controller_space_on_root = get_pacemaker_nodes_attributes(
            root_free)[self.primary_controller_fqdn]['root_free']

        self.disk_monitor_limit = 512

        self.rabbit_disk_free_limit = 5

        self.pacemaker_restart_timeout = 600

        self.pcs_check_timeout = 300

        self.primary_controller_space_to_filled = str(
            int(self.primary_controller_space_on_root) -
            self.disk_monitor_limit - 1)

        self.pcs_status = get_pcs_nodes(pcs_status)

        self.slave_nodes_fqdn = list(
            set(self.pcs_status.keys()).difference(
                set(self.primary_controller_fqdn.split())))
        running_resources_slave_1 = int(
            self.pcs_status[self.slave_nodes_fqdn[0]]['resources_running'])

        running_resources_slave_2 = int(
            self.pcs_status[self.slave_nodes_fqdn[1]]['resources_running'])

        self.slave_node_running_resources = str(
            min(running_resources_slave_1, running_resources_slave_2))
Example #16
0
def run_upgrade_script(node_ssh,
                       script_path,
                       script_name,
                       password='******',
                       rollback=False,
                       exit_code=0):
    path = os.path.join(script_path, script_name)
    check_file_exists(node_ssh, path)
    c_res = node_ssh.execute('chmod 755 {0}'.format(path))
    logger.debug("Result of chmod is {0}".format(c_res))
    if rollback:
        path = "UPGRADERS='host-system docker openstack" \
               " raise-error' {0}/{1}" \
               " --password {2}".format(script_path, script_name, password)
    else:
        path = "{0}/{1} --no-rollback --password {2}".format(
            script_path, script_name, password)

    result = run_on_remote_get_results(node_ssh,
                                       path,
                                       assert_ec_equal=[exit_code],
                                       raise_on_assert=False)

    # TODO: check that we really need this log from fuel_upgrade.log
    if result['exit_code'] != exit_code:
        log = "".join(
            run_on_remote(node_ssh,
                          "awk -v p=\"UPGRADE FAILED\" 'BEGIN{m=\"\"}"
                          " {if ($0 ~ p) {m=$0} else m=m\"\\n\"$0}"
                          " END{if (m ~ p) print m}'"
                          " /var/log/fuel_upgrade.log",
                          raise_on_assert=False))

        logger.error("Message from /var/log/fuel_upgrade.log:\n"
                     "{log}".format(log=log))

    assert_equal(
        result['exit_code'], exit_code,
        "Upgrade script failed with exit code {exit_code}, "
        "please inspect logs for details.\n"
        "last output: \"{output}\""
        "".format(exit_code=result['exit_code'],
                  output=''.join(result['stdout'][-5:]) +
                  result['stderr_str']))
Example #17
0
def run_upgrade_script(node_ssh, script_path, script_name, password='******',
                       rollback=False, exit_code=0):
    path = os.path.join(script_path, script_name)
    check_file_exists(node_ssh, path)
    c_res = node_ssh.execute('chmod 755 {0}'.format(path))
    logger.debug("Result of chmod is {0}".format(c_res))
    if rollback:
        path = "UPGRADERS='host-system docker openstack" \
               " raise-error' {0}/{1}" \
               " --password {2}".format(script_path, script_name, password)
    else:
        path = "{0}/{1} --no-rollback --password {2}".format(script_path,
                                                             script_name,
                                                             password)

    result = run_on_remote_get_results(node_ssh, path,
                                       assert_ec_equal=[exit_code],
                                       raise_on_assert=False)

    # TODO: check that we really need this log from fuel_upgrade.log
    if result['exit_code'] != exit_code:
        log = "".join(
            run_on_remote(node_ssh,
                          "awk -v p=\"UPGRADE FAILED\" 'BEGIN{m=\"\"}"
                          " {if ($0 ~ p) {m=$0} else m=m\"\\n\"$0}"
                          " END{if (m ~ p) print m}'"
                          " /var/log/fuel_upgrade.log",
                          raise_on_assert=False)
        )

        logger.error("Message from /var/log/fuel_upgrade.log:\n"
                     "{log}".format(log=log))

    assert_equal(
        result['exit_code'],
        exit_code,
        "Upgrade script failed with exit code {exit_code}, "
        "please inspect logs for details.\n"
        "last output: \"{output}\""
        "".format(exit_code=result['exit_code'],
                  output=''.join(result['stdout'][-5:]) + result['stderr_str'])
    )
    def deploy_multiple_services_local_mirror(self):
        """Deploy cluster with multiple services using local mirror

        Scenario:
            1. Revert snapshot 'empty' with default set of repositories
            2. Bootstrap 5 slave nodes
            3. Run 'fuel-createmirror' to replace default repositories
               with local mirrors
            4. Create cluster with many components to check as many
               packages in local mirrors have correct dependencies
            5. Deploy cluster

        Duration 50m
        """
        self.env.revert_snapshot("empty")
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:5])

        logger.info("Executing 'fuel-createmirror' on Fuel admin node")
        with self.env.d_env.get_admin_remote() as remote:
            # TODO(ddmitriev):Enable debug via argument for 'fuel-createmirror'
            # when bug#1458469 fixed.
            if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
                cmd = ("sed -i 's/DEBUG=\"no\"/DEBUG=\"yes\"/' {}"
                       .format('/etc/fuel-createmirror/ubuntu.cfg'))
                remote.execute(cmd)
            else:
                # CentOS is not supported yet, see bug#1467403
                pass

            run_on_remote_get_results(remote, 'fuel-createmirror')

        # Check if there all repos were replaced with local mirrors
        ubuntu_id = self.fuel_web.client.get_release_id(
            release_name=OPENSTACK_RELEASE_UBUNTU)
        ubuntu_release = self.fuel_web.client.get_release(ubuntu_id)
        ubuntu_meta = ubuntu_release["attributes_metadata"]
        repos_ubuntu = ubuntu_meta["editable"]["repo_setup"]["repos"]['value']
        remote_repos = []
        for repo_value in repos_ubuntu:
            if (self.fuel_web.admin_node_ip not in repo_value['uri'] and
                    '{settings.MASTER_IP}' not in repo_value['uri']):
                remote_repos.append({repo_value['name']: repo_value['uri']})
        assert_true(not remote_repos,
                    "Some repositories weren't replaced with local mirrors: "
                    "{0}".format(remote_repos))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                'sahara': True,
                'murano': True,
                'ceilometer': True,
                'volumes_lvm': True,
                'volumes_ceph': False,
                'images_ceph': True,
                'osd_pool_size': "3"
            }
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['cinder', 'ceph-osd'],
                'slave-04': ['mongo'],
                'slave-05': ['mongo']
            }
        )

        repos_attr = self.get_cluster_repos(cluster_id)
        self.fuel_web.report_repos(repos_attr)

        # (ddmitriev): No additional checks is required after deploy,
        # just make sure that all components are installed from the
        # local mirrors without any dependency errors or missing packages.
        self.fuel_web.deploy_cluster_wait(cluster_id)
Example #19
0
    def deploy_multiple_services_local_mirror(self):
        """Deploy cluster with multiple services using local mirror

        Scenario:
            1. Revert snapshot 'empty' with default set of repositories
            2. Bootstrap 5 slave nodes
            3. Run 'fuel-createmirror' to replace default repositories
               with local mirrors
            4. Create cluster with many components to check as many
               packages in local mirrors have correct dependencies
            5. Deploy cluster

        Duration 50m
        """
        self.env.revert_snapshot("empty")
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:5])

        logger.info("Executing 'fuel-createmirror' on Fuel admin node")
        with self.env.d_env.get_admin_remote() as remote:
            # TODO(ddmitriev):Enable debug via argument for 'fuel-createmirror'
            # when bug#1458469 fixed.
            if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
                cmd = ("sed -i 's/DEBUG=\"no\"/DEBUG=\"yes\"/' {}".format(
                    '/etc/fuel-createmirror/ubuntu.cfg'))
                remote.execute(cmd)
            else:
                # CentOS is not supported yet, see bug#1467403
                pass

            run_on_remote_get_results(remote, 'fuel-createmirror')

        # Check if there all repos were replaced with local mirrors
        ubuntu_id = self.fuel_web.client.get_release_id(
            release_name=OPENSTACK_RELEASE_UBUNTU)
        ubuntu_release = self.fuel_web.client.get_release(ubuntu_id)
        ubuntu_meta = ubuntu_release["attributes_metadata"]
        repos_ubuntu = ubuntu_meta["editable"]["repo_setup"]["repos"]['value']
        remote_repos = []
        for repo_value in repos_ubuntu:
            if (self.fuel_web.admin_node_ip not in repo_value['uri']
                    and '{settings.MASTER_IP}' not in repo_value['uri']):
                remote_repos.append({repo_value['name']: repo_value['uri']})
        assert_true(
            not remote_repos,
            "Some repositories weren't replaced with local mirrors: "
            "{0}".format(remote_repos))

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      'sahara': True,
                                                      'murano': True,
                                                      'ceilometer': True,
                                                      'volumes_lvm': True,
                                                      'volumes_ceph': False,
                                                      'images_ceph': True,
                                                      'osd_pool_size': "3"
                                                  })
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['cinder', 'ceph-osd'],
                'slave-04': ['mongo'],
                'slave-05': ['mongo']
            })

        repos_attr = self.get_cluster_repos(cluster_id)
        self.fuel_web.report_repos(repos_attr)

        # (ddmitriev): No additional checks is required after deploy,
        # just make sure that all components are installed from the
        # local mirrors without any dependency errors or missing packages.
        self.fuel_web.deploy_cluster_wait(cluster_id)