示例#1
0
文件: utils.py 项目: jvalinas/fuel-qa
def replace_rpm_package(package):
    """Replaced rpm package.rpm on master node with package.rpm
    from review
    """
    ssh = SSHManager()
    logger.info("Patching {}".format(package))
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    try:
        # Upload package
        target_path = '/var/www/nailgun/{}/'.format(package)
        ssh.upload_to_remote(ip=ssh.admin_ip,
                             source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                             target=target_path)

        package_name = package
        package_ext = '*.noarch.rpm'
        pkg_path = os.path.join(target_path,
                                '{}{}'.format(package_name, package_ext))
        full_package_name = get_full_filename(wildcard_name=pkg_path)
        logger.debug('Package name is {0}'.format(full_package_name))
        full_package_path = os.path.join(os.path.dirname(pkg_path),
                                         full_package_name)

        # Update package on master node
        if not does_new_pkg_equal_to_installed_pkg(
                installed_package=package_name, new_package=full_package_path):
            update_rpm(path=full_package_path)

    except Exception:
        logger.error("Could not upload package")
        raise
示例#2
0
    def centos_setup_fuel(self, hostname):
        logger.info("upload fuel-release packet")
        if not settings.FUEL_RELEASE_PATH:
            raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path')
        try:
            ssh = SSHManager()
            pack_path = '/tmp/'
            full_pack_path = os.path.join(pack_path,
                                          'fuel-release*.noarch.rpm')
            ssh.upload_to_remote(ip=ssh.admin_ip,
                                 source=settings.FUEL_RELEASE_PATH.rstrip('/'),
                                 target=pack_path)

        except Exception:
            logger.exception("Could not upload package")

        logger.debug("Update host information")
        cmd = "echo HOSTNAME={} >> /etc/sysconfig/network".format(hostname)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "echo {0} {1} {2} >> /etc/hosts".format(
            ssh.admin_ip, hostname, settings.FUEL_MASTER_HOSTNAME)

        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "hostname {}".format(hostname)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        logger.debug("setup MOS repositories")
        cmd = "rpm -ivh {}".format(full_pack_path)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "yum install -y fuel-setup"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "yum install -y screen"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        logger.info("Install Fuel services")

        cmd = "screen -dm bash -c 'showmenu=no wait_for_external_config=yes " \
              "bootstrap_admin_node.sh'"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        self.env.wait_for_external_config()
        self.env.admin_actions.modify_configs(self.env.d_env.router())
        self.env.kill_wait_for_external_config()

        self.env.wait_bootstrap()

        logger.debug("Check Fuel services")
        self.env.admin_actions.wait_for_fuel_ready()

        logger.debug("post-installation configuration of Fuel services")
        self.fuel_post_install_actions()
示例#3
0
    def gate_fuel_web(self):
        """
    Scenario:
        1. Revert snapshot "empty"
        2. Apply changes into nailgun
        3. Get release id
        4. Update networks
        5. Bootstrap 3 nodes
        6. Create cluster
        7. Add 1 controller nodes
        8. Add 1 compute node
        9. Add 1 cinder node
        10. Run network verify
        11. Deploy environment
        12. Run network verify
        13. Run OSTF
        """
        if not UPDATE_FUEL:
            raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
        self.show_step(1)
        self.env.revert_snapshot("empty")
        self.show_step(2)
        replace_fuel_nailgun_rpm()
        self.show_step(3)
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]
        self.show_step(4)
        self.fuel_web.change_default_network_settings()
        self.show_step(5)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3])
        self.show_step(6)
        cmd = ('fuel env create --name={0} --release={1} --nst=tun '
               '--json'.format(self.__class__.__name__, release_id))
        env_result = self.ssh_manager.execute_on_remote(
            self.ssh_manager.admin_ip, cmd=cmd, jsonify=True)['stdout_json']
        cluster_id = env_result['id']

        self.show_step(7)
        self.show_step(8)
        self.show_step(9)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder'],
            })
        self.show_step(10)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(11)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(13)
        # run only smoke according to sanity and ha ran in deploy_wait()
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])
示例#4
0
文件: utils.py 项目: ehles/fuel-qa
def replace_fuel_agent_rpm(environment):
    """Replaced fuel_agent*.rpm in MCollective with fuel_agent*.rpm
    from review
    environment - Environment Model object - self.env
    """
    logger.info("Patching fuel-agent")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    try:
        pack_path = '/var/www/nailgun/fuel-agent/'
        full_pack_path = os.path.join(pack_path, '*.rpm')
        container = 'mcollective'
        with environment.d_env.get_admin_remote() as remote:
            remote.upload(settings.UPDATE_FUEL_PATH.rstrip('/'),
                          pack_path)

        # Update fuel-agent in MCollective
        cmd = "rpm -q fuel-agent"
        old_package = \
            environment.base_actions.execute_in_container(
                cmd, container, exit_code=0)
        cmd = "rpm -qp {0}".format(full_pack_path)
        new_package = \
            environment.base_actions.execute_in_container(
                cmd, container)
        logger.info("Updating package {0} with {1}"
                    .format(old_package, new_package))

        cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)
        environment.base_actions.execute_in_container(
            cmd, container, exit_code=0)

        cmd = "rpm -q fuel-agent"
        installed_package = \
            environment.base_actions.execute_in_container(
                cmd, container, exit_code=0)

        assert_equal(installed_package, new_package,
                     "The new package {0} was not installed".
                     format(new_package))

        # Update fuel-agent on master node
        with environment.d_env.get_admin_remote() as remote:
            cmd = "rpm -Uvh --oldpackage {0}".format(
                full_pack_path)
            result = remote.execute(cmd)
        assert_equal(result['exit_code'], 0,
                     ('Failed to update package {}').format(result))

    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
    def bootstrap_multipath(self):
        """Bootstrap node with multipath devices

        Scenario:
            1. Setup environment
            2. Bootstrap slave nodes
            3. Verify multipath devices on the nodes

        Duration 30m

        """
        if not MULTIPATH:
            raise exceptions.FuelQAVariableNotSet('MULTIPATH', 'true')
        if not MULTIPATH_TEMPLATE:
            raise exceptions.FuelQAVariableNotSet(
                'MULTIPATH_TEMPLATE',
                'system_test/tests_templates/tests_configs/'
                'multipath_3_nodes.yaml')
        if int(SLAVE_MULTIPATH_DISKS_COUNT) < 1:
            raise exceptions.FuelQAVariableNotSet(
                'SLAVE_MULTIPATH_DISKS_COUNT', '2')

        self.show_step(1)
        self._devops_config = load_yaml(MULTIPATH_TEMPLATE)
        with TimeStat("setup_environment", is_uniq=True):
            self.env.setup_environment()
            self.fuel_post_install_actions()
            if REPLACE_DEFAULT_REPOS and REPLACE_DEFAULT_REPOS_ONLY_ONCE:
                self.fuel_web.replace_default_repos()
        self.fuel_web.get_nailgun_version()
        self.fuel_web.change_default_network_settings()

        self.show_step(2)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3],
                                 skip_timesync=True)

        self.show_step(3)
        for ip in [node['ip'] for node in self.fuel_web.client.list_nodes()]:
            self.check_multipath_devices(ip, SLAVE_MULTIPATH_DISKS_COUNT)
示例#6
0
文件: utils.py 项目: jvalinas/fuel-qa
def upload_nailgun_agent_rpm():
    """Upload nailgun_agent.rpm on master node
    """
    ssh = SSHManager()
    logger.info("Upload nailgun-agent")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    pack_path = '/var/www/nailgun/nailgun-agent-review/'
    ssh.upload_to_remote(ip=ssh.admin_ip,
                         source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                         target=pack_path)
    # Extract rpm context
    cmd = 'cd {0}; rpm2cpio {1} | cpio -idmv'.format(
        pack_path, 'nailgun-agent-*.noarch.rpm ')
    ssh.execute_on_remote(ssh.admin_ip, cmd)
示例#7
0
def install_mos_repos():
    """
    Upload and install fuel-release packet with mos-repo description
    and install necessary packets for packetary Fuel installation
    :return: nothing
    """
    logger.info("upload fuel-release packet")
    if not settings.FUEL_RELEASE_PATH:
        raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path')
    try:
        ssh = SSHManager()
        pack_path = '/tmp/'
        full_pack_path = os.path.join(pack_path,
                                      'fuel-release*.noarch.rpm')
        ssh.upload_to_remote(
            ip=ssh.admin_ip,
            source=settings.FUEL_RELEASE_PATH.rstrip('/'),
            target=pack_path)

        if settings.RPM_REPOS_YAML:
            with ssh.open_on_remote(
                    ip=ssh.admin_ip,
                    path='/etc/yum.repos.d/custom.repo',
                    mode="w") as f:
                f.write(generate_yum_repos_config(settings.RPM_REPOS_YAML))

        if settings.DEB_REPOS_YAML:
            ssh = SSHManager()
            pack_path = "/root/default_deb_repos.yaml"
            ssh.upload_to_remote(
                ip=ssh.admin_ip,
                source=settings.DEB_REPOS_YAML,
                target=pack_path)

    except Exception:
        logger.exception("Could not upload package")
        raise

    logger.debug("setup MOS repositories")
    cmd = "rpm -ivh {}".format(full_pack_path)
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

    cmd = "yum install -y fuel-setup"
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
示例#8
0
def replace_fuel_agent_rpm(environment):
    """Replaced fuel_agent.rpm on master node with fuel_agent.rpm
    from review
    environment - Environment Model object - self.env
    """
    logger.info("Patching fuel-agent")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    try:
        pack_path = '/var/www/nailgun/fuel-agent/'
        full_pack_path = os.path.join(pack_path, 'fuel-agent*.noarch.rpm')
        with environment.d_env.get_admin_remote() as remote:
            remote.upload(settings.UPDATE_FUEL_PATH.rstrip('/'), pack_path)

        # Update fuel-agent on master node
        cmd = "rpm -q fuel-agent"
        old_package = \
            environment.base_actions.execute(cmd, exit_code=0)
        cmd = "rpm -qp {0}".format(full_pack_path)
        new_package = \
            environment.base_actions.execute(cmd)
        logger.info("Updating package {0} with {1}".format(
            old_package, new_package))

        if old_package != new_package:
            logger.info("Updating fuel-agent package on master node")
            logger.info('Try to install package {0}'.format(new_package))
            cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)
            environment.base_actions.execute(cmd, exit_code=0)

            cmd = "rpm -q fuel-agent"
            installed_package = \
                environment.base_actions.execute(cmd, exit_code=0)

            assert_equal(
                installed_package, new_package,
                "The new package {0} was not installed".format(new_package))

    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
示例#9
0
def replace_fuel_agent_rpm():
    """Replaced fuel_agent.rpm on master node with fuel_agent.rpm
    from review
    """
    ssh = SSHManager()
    logger.info("Patching fuel-agent")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    try:
        pack_path = '/var/www/nailgun/fuel-agent/'
        full_pack_path = os.path.join(pack_path, 'fuel-agent*.noarch.rpm')
        ssh.upload_to_remote(ip=ssh.admin_ip,
                             source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                             target=pack_path)

        # Update fuel-agent on master node
        cmd = "rpm -q fuel-agent"
        old_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']
        cmd = "rpm -qp {0}".format(full_pack_path)
        new_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']
        logger.info("Updating package {0} with {1}".format(
            old_package, new_package))

        if old_package != new_package:
            logger.info("Updating fuel-agent package on master node")
            logger.info('Try to install package {0}'.format(new_package))
            cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)
            ssh.execute_on_remote(ssh.admin_ip, cmd)

            cmd = "rpm -q fuel-agent"
            installed_package = ssh.execute_on_remote(ssh.admin_ip,
                                                      cmd)['stdout_str']

            assert_equal(
                installed_package, new_package,
                "The new package {0} was not installed".format(new_package))

    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
    def gate_patch_fuel_agent(self):
        """ Revert snapshot, update fuel-agent, bootstrap from review
        and provision one node

    Scenario:
        1. Revert snapshot "ready"
        2. Update fuel-agent, fuel-bootstrap-cli on master node
        3. Update fuel_bootstrap_cli.yaml
        4. Rebuild bootstrap
        5. Bootstrap 5 slaves
        6. Verify Ubuntu bootstrap on slaves
        7. Add 1 node with controller
        8. Add 1 node ironic role
        9. Deploy the cluster
        10. Verify fuel-agent version in ubuntu and ironic-bootstrap
        11. Upload image to glance
        12. Enroll Ironic nodes
        13. Boot nova instance
        14. Check Nova instance status

        Snapshot review_fuel_agent_ironic_deploy
        """
        if not settings.UPDATE_FUEL:
            raise exceptions.FuelQAVariableNotSet(settings.UPDATE_FUEL, 'true')

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready")

        self.show_step(2)
        replace_rpm_package('fuel-agent')
        replace_rpm_package('fuel-bootstrap-cli')

        self.show_step(3)
        self.update_bootstrap_cli_yaml()

        self.show_step(4)
        uuid, bootstrap_location = \
            self.env.fuel_bootstrap_actions.build_bootstrap_image()
        self.env.fuel_bootstrap_actions. \
            import_bootstrap_image(bootstrap_location)
        self.env.fuel_bootstrap_actions. \
            activate_bootstrap_image(uuid)

        self.show_step(5)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:2])

        self.show_step(6)
        for node in self.env.d_env.nodes().slaves[:2]:
            _ip = self.fuel_web.get_nailgun_node_by_devops_node(node)['ip']
            verify_bootstrap_on_node(_ip, os_type="ubuntu", uuid=uuid)

        data = {
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            "ironic": True
        }

        nodes = {'slave-01': ['controller'], 'slave-02': ['ironic']}

        self.show_step(7)
        self.show_step(8)
        self.show_step(9)

        cluster_id = self._deploy_ironic_cluster(settings=data, nodes=nodes)

        ironic_conn = ironic_actions.IronicActions(
            self.fuel_web.get_public_vip(cluster_id))

        self.show_step(10)
        check_package_version_injected_in_bootstraps("fuel-agent")

        check_package_version_injected_in_bootstraps("fuel-agent",
                                                     cluster_id=cluster_id,
                                                     ironic=True)

        self.show_step(11)
        self.show_step(12)
        self._create_os_resources(ironic_conn)

        self.show_step(13)
        self._boot_nova_instances(ironic_conn)

        self.show_step(14)
        ironic_conn.wait_for_vms(ironic_conn)
        ironic_conn.verify_vms_connection(ironic_conn)

        self.env.make_snapshot("review_fuel_agent_ironic_deploy")
示例#11
0
    def basic_env_for_numa_cpu_pinning(self):
        """Basic environment for NUMA CPU pinning

        Scenario:
            1. Create cluster
            2. Add 2 nodes with compute role
            3. Add 3 nodes with controller role
            4. Verify that quantity of NUMA is equal on node and in Fuel

        Snapshot: basic_env_for_numa_cpu_pinning
        """
        snapshot_name = 'basic_env_for_numa_cpu_pinning'
        self.check_run(snapshot_name)
        self.env.revert_snapshot("ready_with_5_slaves")

        # TODO(kdemina) Use commomn function for variables asserts
        if not settings.KVM_USE:
            raise exceptions.FuelQAVariableNotSet('KVM_USE', 'true')

        if int(settings.HARDWARE['slave_node_cpu']) < 6:
            raise exceptions.FuelQAVariableNotSet('SLAVE_NODE_CPU', 6)

        if int(settings.HARDWARE['numa_nodes']) < 2:
            raise exceptions.FuelQAVariableNotSet('NUMA_NODES', 2)

        if not settings.INTERFACES_DICT['eth0'] == 'ens3':
            raise exceptions.FuelQAVariableNotSet('IFACE_0', 'ens3')

        if not settings.INTERFACES_DICT['eth1'] == 'ens4':
            raise exceptions.FuelQAVariableNotSet('IFACE_1', 'ens4')

        if not settings.INTERFACES_DICT['eth2'] == 'ens5':
            raise exceptions.FuelQAVariableNotSet('IFACE_2', 'ens5')

        elif not settings.INTERFACES_DICT['eth3'] == 'ens6':
            raise exceptions.FuelQAVariableNotSet('IFACE_3', 'ens6')

        elif not settings.INTERFACES_DICT['eth4'] == 'ens7':
            raise exceptions.FuelQAVariableNotSet('IFACE_4', 'ens7')

        elif not settings.INTERFACES_DICT['eth5'] == 'ens8':
            raise exceptions.FuelQAVariableNotSet('IFACE_5', 'ens8')

        elif not settings.ACPI_ENABLE:
            raise exceptions.FuelQAVariableNotSet('DRIVER_ENABLE_ACPI', 'true')

        self.show_step(1)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT_TYPE
            })
        self.show_step(2)
        self.show_step(3)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['compute'],
                'slave-02': ['compute'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller']
            })

        self.show_step(4)

        for node in ('slave-01', 'slave-02'):
            target_node = self.fuel_web.get_nailgun_node_by_name(node)
            numas_from_fuel = len(
                target_node['meta']['numa_topology']['numa_nodes'])
            numas_on_remote = utils.get_quantity_of_numa(target_node['ip'])
            if not numas_on_remote:
                # Fuel handle topology without NUMA as 1 NUMA node
                asserts.assert_equal(
                    numas_from_fuel, 1, "No NUMA nodes on {0} "
                    "while Fuel shows it "
                    "has {1}".format(target_node['ip'], numas_from_fuel))
                raise AssertionError("No NUMA nodes on {0}".format(
                    target_node['ip']))
            else:
                asserts.assert_equal(
                    numas_on_remote, numas_from_fuel, "{0} NUMA nodes on {1} "
                    "while Fuel shows it "
                    "has {2}".format(numas_on_remote, target_node['ip'],
                                     numas_from_fuel))
                logger.info("There is {0} NUMA nodes on node {1}".format(
                    numas_on_remote, target_node['ip']))
        self.env.make_snapshot(snapshot_name, is_make=True)
示例#12
0
    def review_fuel_cli_one_node_deploy(self):
        """ Revert snapshot, apply changes from review and deploy
        cluster with controller node only over cli.

        Scenario:
            1. Revert snapshot 'ready_with_1_slave'
            2. Apply changes from review
            3. Bootstrap 1 node
            4. Show  releases list
            5. Create cluster over cli
            6. Update networks
            7. Update SSL settings
            8. List environments
            9. Add and provision 1 node with controller role
            10. Deploy node
            11. Delete cluster

        Duration 20m
        """
        if not UPDATE_FUEL:
            raise exceptions.FuelQAVariableNotSet(UPDATE_FUEL, 'true')
        self.show_step(1, initialize=True)
        self.env.revert_snapshot('ready_with_1_slaves')
        target_path = '/var/www/nailgun/python-fuelclient/'
        package_name = 'python-fuelclient'
        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(2)
            self.upload_package(remote, target_path, package_name)
            self.replace_package(remote,
                                 package_name=package_name,
                                 package_path=target_path)

        self.show_step(3)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:1])

        node_id = [
            self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[0])['id']
        ]

        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(3)
            # get releases list
            self.show_step(4)
            list_release_cmd = 'fuel release --json'
            list_release_res = run_on_remote(remote,
                                             list_release_cmd,
                                             jsonify=True)
            active_release_id = [
                release['id'] for release in list_release_res
                if release['is_deployable']
            ]
            asserts.assert_true(
                active_release_id, 'Can not find deployable release. '
                'Current release data {0}'.format(list_release_res))

            # Create an environment
            self.show_step(5)
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             active_release_id[0]))

            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']
            cluster_name = env_result['name']

            # Update network parameters
            self.show_step(6)
            self.update_cli_network_configuration(cluster_id, remote)

            # Update SSL configuration
            self.show_step(7)
            self.update_ssl_configuration(cluster_id, remote)

            self.show_step(8)
            cmd = 'fuel env --json'
            env_list_res = run_on_remote(remote, cmd, jsonify=True)
            asserts.assert_true(
                cluster_id in [cluster['id'] for cluster in env_list_res],
                'Can not find created before environment'
                ' id in fuel environment list.')
            asserts.assert_true(
                cluster_name in [cluster['name'] for cluster in env_list_res],
                'Can not find cluster name in fuel env command output')

            # Add and provision a controller node
            self.show_step(9)
            logger.info("Add to the cluster and start provisioning "
                        "a controller node [{0}]".format(node_id[0]))
            cmd = ('fuel --env-id={0} node set --node {1} --role=controller'.
                   format(cluster_id, node_id[0]))
            remote.execute(cmd)
            cmd = (
                'fuel --env-id={0} node --provision --node={1} --json'.format(
                    cluster_id, node_id[0]))
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=30 * 60)

            # Deploy the controller node
            self.show_step(10)
            cmd = ('fuel --env-id={0} node --deploy --node {1} --json'.format(
                cluster_id, node_id[0]))
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=60 * 60)

        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity'])
        self.show_step(11)
        with self.env.d_env.get_admin_remote() as remote:
            res = remote.execute(
                'fuel --env {0} env delete'.format(cluster_id))
        asserts.assert_true(res['exit_code'] == 0)

        with self.env.d_env.get_admin_remote() as remote:
            try:
                wait(lambda: remote.execute("fuel env |  awk '{print $1}'"
                                            " |  tail -n 1 | grep '^.$'")[
                                                'exit_code'] == 1,
                     timeout=60 * 10)
            except TimeoutError:
                raise TimeoutError(
                    "cluster {0} was not deleted".format(cluster_id))

        self.env.make_snapshot("review_fuel_cli_one_node_deploy")
示例#13
0
def replace_fuel_nailgun_rpm(environment):
    """
    Replace fuel_nailgun*.rpm from review
    environment - Environment Model object - self.env
    """
    logger.info("Patching fuel-nailgun")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    pack_path = '/var/www/nailgun/fuel-nailgun/'

    full_pack_path = os.path.join(pack_path, 'fuel-nailgun*.noarch.rpm')
    logger.info('Package path {0}'.format(full_pack_path))
    with environment.d_env.get_admin_remote() as remote:
        remote.upload(settings.UPDATE_FUEL_PATH.rstrip('/'), pack_path)

    # Check old fuel-nailgun package
    cmd = "rpm -q fuel-nailgun"

    old_package = environment.base_actions.execute(cmd, exit_code=0)
    logger.info('Current package version of '
                'fuel-nailgun: {0}'.format(old_package))

    cmd = "rpm -qp {0}".format(full_pack_path)
    new_package = environment.base_actions.execute(cmd)
    logger.info("Updating package {0} with {1}".format(old_package,
                                                       new_package))

    if old_package == new_package:
        logger.debug('Looks like package from review '
                     'was installed during setups of master node')
        return

    # stop services
    service_list = ['assassind', 'receiverd', 'nailgun', 'statsenderd']
    [
        environment.base_actions.execute('systemctl stop {0}'.format(service),
                                         exit_code=0)
        for service in service_list
    ]

    # stop statistic services
    [
        environment.base_actions.execute('systemctl stop {0}'.format(service),
                                         exit_code=0)
        for service in get_oswl_services_names(environment)
    ]

    # Drop nailgun db manage.py dropdb
    cmd = 'manage.py dropdb'
    environment.base_actions.execute(cmd, exit_code=0)

    # Delete package
    logger.info("Delete package {0}".format(old_package))
    cmd = "rpm -e fuel-nailgun"
    environment.base_actions.execute(cmd, exit_code=0)

    logger.info("Install package {0}".format(new_package))

    cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)

    environment.base_actions.execute(cmd, exit_code=0)

    cmd = "rpm -q fuel-nailgun"
    installed_package = environment.base_actions.execute(cmd, exit_code=0)

    assert_equal(installed_package, new_package,
                 "The new package {0} was not installed".format(new_package))

    cmd = ('puppet apply --debug '
           '/etc/puppet/modules/fuel/examples/nailgun.pp')
    environment.base_actions.execute(cmd, exit_code=0)
    with environment.d_env.get_admin_remote() as remote:
        res = remote.execute(
            "fuel release --sync-deployment-tasks --dir /etc/puppet/")
        assert_equal(res['exit_code'], 0,
                     'Failed to sync tasks with result {0}'.format(res))
示例#14
0
文件: utils.py 项目: jvalinas/fuel-qa
def replace_fuel_nailgun_rpm():
    """
    Replace fuel_nailgun*.rpm from review
    """
    logger.info("Patching fuel-nailgun")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    pack_path = '/var/www/nailgun/fuel-nailgun/'

    full_pack_path = os.path.join(pack_path, 'fuel-nailgun*.noarch.rpm')
    logger.info('Package path {0}'.format(full_pack_path))
    ssh.upload_to_remote(ip=ssh.admin_ip,
                         source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                         target=pack_path)

    # Check old fuel-nailgun package
    cmd = "rpm -q fuel-nailgun"

    old_package = ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info('Current package version of '
                'fuel-nailgun: {0}'.format(old_package))

    cmd = "rpm -qp {0}".format(full_pack_path)
    new_package = ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info("Updating package {0} with {1}".format(old_package,
                                                       new_package))

    if old_package == new_package:
        logger.debug('Looks like package from review '
                     'was installed during setups of master node')
        return

    # stop services
    service_list = ['assassind', 'receiverd', 'nailgun', 'statsenderd']
    for service in service_list:
        ssh.execute_on_remote(ip=ssh.admin_ip,
                              cmd='systemctl stop {0}'.format(service))
    logger.info('statistic services {0}'.format(get_oswl_services_names()))
    # stop statistic services
    for service in get_oswl_services_names():
        ssh.execute_on_remote(ip=ssh.admin_ip,
                              cmd='systemctl stop {0}'.format(service))

    # Drop nailgun db manage.py dropdb
    cmd = 'manage.py dropdb'
    ssh.execute_on_remote(ssh.admin_ip, cmd)

    # Delete package
    logger.info("Delete package {0}".format(old_package))
    cmd = "rpm -e fuel-nailgun"
    ssh.execute_on_remote(ssh.admin_ip, cmd)

    logger.info("Install package {0}".format(new_package))

    cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)

    ssh.execute_on_remote(ssh.admin_ip, cmd)

    cmd = "rpm -q fuel-nailgun"
    installed_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']

    assert_equal(installed_package, new_package,
                 "The new package {0} was not installed".format(new_package))

    cmd = ('puppet apply --debug '
           '/etc/puppet/modules/fuel/examples/nailgun.pp')
    ssh.execute_on_remote(ssh.admin_ip, cmd)
    cmd_sync = 'fuel release --sync-deployment-tasks --dir /etc/puppet/'
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd_sync)
示例#15
0
    def review_fuel_cli_one_node_deploy(self):
        """ Revert snapshot, apply changes from review and deploy
        cluster with controller node only over cli.

        Scenario:
            1. Revert snapshot 'ready_with_1_slave'
            2. Apply changes from review
            3. Bootstrap 1 node
            4. Show  releases list
            5. Create cluster over cli
            6. Update networks
            7. Update SSL settings
            8. List environments
            9. Add and provision 1 node with controller role
            10. Delete cluster

        Duration 20m
        """
        if not UPDATE_FUEL:
            raise exceptions.FuelQAVariableNotSet(UPDATE_FUEL, 'true')
        self.show_step(1, initialize=True)
        self.env.revert_snapshot('ready_with_1_slaves')
        target_path = '/var/www/nailgun/python-fuelclient/'
        package_name = 'python-fuelclient'
        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(2)
            self.upload_package(remote, target_path, package_name)
            self.replace_package(remote, package_name=package_name,
                                 package_path=target_path)

        self.show_step(3)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[:1])

        node_id = [self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0])['id']]

        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(3)
            # get releases list
            self.show_step(4)
            list_release_cmd = 'fuel release --json'
            list_release_res = remote.check_call(list_release_cmd).stdout_json
            active_releases = [
                release for release
                in list_release_res if release['is_deployable'] and
                OPENSTACK_RELEASE.lower() in release['name'].lower()]

            active_release_id = [release['id'] for release in active_releases]
            asserts.assert_true(
                active_release_id, 'Can not find deployable release. '
                'Current release data {0}'.format(list_release_res))

            logger.info('Available for deploy: \n{!s}'.format(
                '\n'.join(
                    ['\tID:      {id}\n'
                     '\tSTATE:   {state}\n'
                     '\tNAME:    {name}\n'
                     '\tVERSION: {version}\n'
                     '\tOS:      {operating_system}\n'.format(**release) for
                     release in active_releases]))
            )

            # Create an environment
            self.show_step(5)
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             active_release_id[0]))

            env_result = remote.check_call(cmd).stdout_json
            cluster_id = env_result['id']
            cluster_name = env_result['name']

            # Update network parameters
            self.show_step(6)
            self.update_cli_network_configuration(cluster_id)

            # Update SSL configuration
            self.show_step(7)
            self.update_ssl_configuration(cluster_id)

            self.show_step(8)
            cmd = 'fuel env --json'
            env_list_res = remote.check_call(cmd).stdout_json
            asserts.assert_true(
                cluster_id in [cluster['id'] for cluster in env_list_res],
                'Can not find created before environment'
                ' id in fuel environment list.')
            asserts.assert_true(
                cluster_name in [cluster['name'] for cluster in env_list_res],
                'Can not find cluster name in fuel env command output')

            # Add and provision a controller node
            self.show_step(9)
            logger.info("Add to the cluster and start provisioning "
                        "a controller node [{0}]".format(node_id[0]))
            cmd = ('fuel --env-id={0} node set --node {1} --role=controller'
                   .format(cluster_id, node_id[0]))
            remote.execute(cmd)
            self.update_node_interfaces(node_id[0])
            cmd = ('fuel --env-id={0} node --provision --node={1} --json'
                   .format(cluster_id, node_id[0]))
            task = remote.check_call(cmd).stdout_json
            self.assert_cli_task_success(task, timeout=30 * 60)

            self.show_step(10)
            remote.check_call(
                'fuel --env {0} env delete --force'.format(cluster_id))

            wait(lambda:
                 remote.execute("fuel env |  awk '{print $1}'"
                                " |  tail -n 1 | grep '^.$'")
                 ['exit_code'] == 1, timeout=60 * 10,
                 timeout_msg='cluster {0} was not deleted'.format(cluster_id))

        self.env.make_snapshot("review_fuel_cli_one_node_deploy")
示例#16
0
    def separate_haproxy(self):
        """Deploy HA environment with separate Haproxy.

        Scenario:
            1. Revert snapshot with ready master node
            2. Copy and install external-lb and detach-haproxy plugins
            3. Bootstrap 3 slaves from default nodegroup
            4. Create cluster with Neutron VXLAN and custom nodegroups
            5. Run 'fuel-mirror' to replace cluster repositories
               with local mirrors
            6. Bootstrap 2 slaves nodes from second nodegroup
               and one node from third node group
            7. Enable plugins for cluster
            8. Add 2 controllers from default nodegroup and 1 controller
               from second node group
            9. Add 1 compute+cinder from default node group
               and 1 compute+cinder from second node group
            10. Add haproxy node from third node group
            11. Verify networks
            12. Deploy cluster

        Duration 120m
        Snapshot separate_haproxy
        """

        if not MULTIPLE_NETWORKS:
            raise exceptions.FuelQAVariableNotSet('MULTIPLE_NETWORKS', 'true')

        self.show_step(1)
        self.env.revert_snapshot('ready')

        self.show_step(2)
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH,
                             tar_target="/var")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH,
                             tar_target="/var")

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH))

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(SEPARATE_SERVICE_BALANCER_PLUGIN_PATH))

        self.show_step(3)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
        self.show_step(4)
        admin_ip = self.ssh_manager.admin_ip
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings={
                                                      'net_provider':
                                                      NEUTRON,
                                                      'net_segment_type':
                                                      NEUTRON_SEGMENT['tun'],
                                                      'tenant':
                                                      'separatehaproxy',
                                                      'user':
                                                      '******',
                                                      'password':
                                                      '******',
                                                      'ntp_list': [admin_ip],
                                                  })
        self.show_step(5)
        if MIRROR_UBUNTU != '':
            ubuntu_url = MIRROR_UBUNTU.split()[1]
            replace_cmd = \
                "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \
                " /usr/share/fuel-mirror/ubuntu.yaml".format(
                    ubuntu_url)
            self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=replace_cmd)
        create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd)
        apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \
                           '--env {0} --replace'.format(cluster_id)
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd)

        self.show_step(6)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7])

        self.show_step(7)
        plugin_name = 'detach_haproxy'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        plugin_name = 'external_loadbalancer'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.show_step(8)
        self.show_step(9)
        self.show_step(10)
        nodegroup1 = NODEGROUPS[0]['name']
        nodegroup2 = NODEGROUPS[1]['name']
        nodegroup3 = NODEGROUPS[2]['name']

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': [['controller'], nodegroup1],
                'slave-02': [['controller'], nodegroup1],
                'slave-04': [['compute', 'cinder'], nodegroup2],
                'slave-05': [['controller'], nodegroup2],
                'slave-03': [['compute', 'cinder'], nodegroup1],
                'slave-07': [['standalone-haproxy'], nodegroup3]
            })

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(12)
        self.fuel_web.deploy_cluster_wait(cluster_id,
                                          timeout=180 * 60,
                                          check_services=False)

        self.env.make_snapshot('separate_haproxy')
示例#17
0
文件: utils.py 项目: ehles/fuel-qa
def replace_fuel_nailgun_rpm(environment):
    """
    Replace fuel_nailgun*.rpm from review
    environment - Environment Model object - self.env
    """
    logger.info("Patching fuel-nailgun")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    try:
        pack_path = '/var/www/nailgun/fuel-nailgun/'
        container = 'nailgun'
        with environment.d_env.get_admin_remote() as remote:
            remote.upload(settings.UPDATE_FUEL_PATH.rstrip('/'),
                          pack_path)
        # stop services
        service_list = ['assassind', 'receiverd',
                        'nailgun', 'oswl_*', 'statsenderd']
        [environment.base_actions.execute_in_container(
            'systemctl stop {0}'.format(service),
            container, exit_code=0) for service in service_list]

        # Update fuel-nailgun in nailgun
        cmd = "rpm -q fuel-nailgun"
        try:
            old_package = \
                environment.base_actions.execute_in_container(
                    cmd, container, exit_code=0)
            logger.info("Delete package {0}"
                        .format(old_package))
        except AssertionError:
            if 'fuel-nailgun is not installed' in AssertionError.message:
                old_package = None
            else:
                raise AssertionError
        # Drop nailgun db manage.py dropdb
        cmd = 'manage.py dropdb'
        environment.base_actions.execute_in_container(
            cmd, container, exit_code=0)

        cmd = "rpm -e fuel-nailgun"
        environment.base_actions.execute_in_container(
            cmd, container, exit_code=0)

        cmd = "ls -1 {0}|grep 'fuel-nailgun'".format(pack_path)
        new_package = \
            environment.base_actions.execute_in_container(
                cmd, container).rstrip('.rpm')
        logger.info("Install package {0}"
                    .format(new_package))

        cmd = "yum localinstall -y {0}fuel-nailgun*.rpm".format(
            pack_path)
        environment.base_actions.execute_in_container(
            cmd, container, exit_code=0)

        cmd = "rpm -q fuel-nailgun"
        installed_package = \
            environment.base_actions.execute_in_container(
                cmd, container, exit_code=0)
        if old_package:
            assert_equal(installed_package, new_package,
                         "The new package {0} was not installed".
                         format(new_package))

        cmd = ('puppet apply --debug'
               ' /etc/puppet/modules/nailgun/examples/nailgun-only.pp')
        environment.base_actions.execute_in_container(
            cmd, container, exit_code=0)
        with environment.d_env.get_admin_remote() as remote:
            res = remote.execute("fuel release --sync-deployment-tasks"
                                 " --dir /etc/puppet/")
            assert_equal(res['exit_code'], 0,
                         'Failed to sync tasks with result {0}'.format(res))

    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
示例#18
0
    def basic_env_for_hugepages(self):
        """Basic environment for hugepages

        Scenario:
            1. Create cluster
            2. Add 3 compute nodes and 1 controller node
            3. Check what type of HugePages do support 2M and 1GB
            4. Verify the same HP size is present in CLI
            5. Download attributes for computes and check HP size

        Snapshot: basic_env_for_hugepages

        """
        snapshot_name = 'basic_env_for_hugepages'
        self.check_run(snapshot_name)
        self.env.revert_snapshot("ready_with_5_slaves")

        if not settings.KVM_USE:
            raise exceptions.FuelQAVariableNotSet('KVM_USE', 'true')

        if not DRIVER_PARAMETERS['enable_acpi']:
            raise exceptions.FuelQAVariableNotSet('DRIVER_ENABLE_ACPI', 'true')

        if settings.HARDWARE['numa_nodes'] != 2:
            raise exceptions.FuelQAVariableNotSet('NUMA_NODES', 2)

        if settings.HARDWARE['slave_node_cpu'] != 4:
            raise exceptions.FuelQAVariableNotSet('SLAVE_NODE_CPU', 4)

        if settings.HARDWARE['slave_node_memory'] != 5120:
            raise exceptions.FuelQAVariableNotSet('SLAVE_NODE_MEMORY', 5120)

        if not settings.INTERFACES_DICT['eth0'] == 'ens3':
            raise exceptions.FuelQAVariableNotSet('IFACE_0', 'ens3')

        if not settings.INTERFACES_DICT['eth1'] == 'ens4':
            raise exceptions.FuelQAVariableNotSet('IFACE_1', 'ens4')

        if not settings.INTERFACES_DICT['eth2'] == 'ens5':
            raise exceptions.FuelQAVariableNotSet('IFACE_2', 'ens5')

        if not settings.INTERFACES_DICT['eth3'] == 'ens6':
            raise exceptions.FuelQAVariableNotSet('IFACE_3', 'ens6')

        if not settings.INTERFACES_DICT['eth4'] == 'ens7':
            raise exceptions.FuelQAVariableNotSet('IFACE_4', 'ens7')

        if not settings.INTERFACES_DICT['eth5'] == 'ens8':
            raise exceptions.FuelQAVariableNotSet('IFACE_5', 'ens8')

        self.show_step(1)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT_TYPE
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['compute'],
                'slave-02': ['compute'],
                'slave-03': ['compute'],
                'slave-04': ['compute', 'cinder'],
                'slave-05': ['controller']
            })

        self.show_step(3)
        computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'], role_status="pending_roles")
        for compute in computes:
            self.ssh_manager.execute_on_remote(
                ip=compute['ip'],
                cmd="grep \"pse\" /proc/cpuinfo",
                err_msg="{} compute doesn't support 2Mb HugePages".format(
                    compute['fqdn']))

            self.ssh_manager.execute_on_remote(
                ip=compute['ip'],
                cmd="grep \"pdpe1gb\" /proc/cpuinfo",
                err_msg="{} compute doesn't support 1GB HugePages".format(
                    compute['fqdn']))

        self.show_step(4)
        for compute in computes:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd="fuel2 node show {0} | grep hugepages | "
                "grep 2048".format(compute['id']),
                err_msg="2Mb HugePages doesn't present in CLI for node "
                "{0}".format(compute['fqdn']))
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd="fuel2 node show {0} | grep hugepages | "
                "grep 1048576".format(compute['id']),
                err_msg="1Gb HugePages doesn't present in CLI for node "
                "{0}".format(compute['fqdn']))

        self.show_step(5)
        for compute in computes:
            config = self.fuel_web.client.get_node_attributes(compute['id'])
            asserts.assert_true(
                config['hugepages']['nova']['value']['2048'] == 0,
                "Number of 2Mb HugePages for node {} is not "
                "0".format(compute['fqdn']))
            asserts.assert_true(
                config['hugepages']['nova']['value']['1048576'] == 0,
                "Number of 1Gb HugePages for node {} is not "
                "0".format(compute['fqdn']))

        self.env.make_snapshot(snapshot_name, is_make=True)