Ejemplo n.º 1
0
    def prepare_ubuntu_bootstrap(self):
        """Verify than slaves retrieved ubuntu bootstrap instead CentOS

        Scenario:
            1. Revert snapshot ready
            2. Choose Ubuntu bootstrap on master node
            3. Bootstrap slaves
            4. Verify bootstrap on slaves

        Duration 15m
        Snapshot: prepare_ubuntu_bootstrap
        """

        self.env.revert_snapshot("ready")

        # Run script on master node to change bootstrap to Ubuntu
        with self.env.d_env.get_admin_remote() as remote:

            cmd = 'fuel-bootstrap-image-set ubuntu'
            run_on_remote(remote, cmd)

            # Should be removed after Bug#1482242 will be fixed
            cmd = 'dockerctl shell cobbler service dnsmasq restart'
            run_on_remote(remote, cmd)

        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[:3])

        # Verify version of bootstrap on slaves
        self.verify_bootstrap_on_slaves(self.env.d_env.nodes().slaves[:3])

        self.env.make_snapshot("prepare_ubuntu_bootstrap", is_make=True)
Ejemplo n.º 2
0
 def download_settings(self, cluster_id, remote):
     cmd = ('fuel --env {0} settings --download --dir /tmp --json'.format(
         cluster_id))
     run_on_remote(remote, cmd)
     return run_on_remote(remote,
                          'cd /tmp && cat settings_{0}.json'.format(
                              cluster_id), jsonify=True)
Ejemplo n.º 3
0
 def download_settings(self, cluster_id, remote):
     cmd = ('fuel --env {0} settings --download --dir /tmp --json'.format(
         cluster_id))
     run_on_remote(remote, cmd)
     return run_on_remote(remote,
                          'cd /tmp && cat settings_{0}.json'.format(
                              cluster_id), jsonify=True)
    def do_backup(self,
                  backup_path, local_path,
                  repos_backup_path=None, repos_local_path=None):
        """ Wrapper for backup process of upgrading procedure"""
        # BOTH repos arguments should be passed at the same time
        # or BOTH should not be passed
        assert_equal(bool(repos_backup_path), bool(repos_local_path),
                     "Both repos arguments should be specified")
        self.install_octane()

        cmd = "mkdir -p {}".format(self.remote_dir_for_backups)
        run_on_remote(self.admin_remote, cmd)

        self.octane_action("backup", backup_path)
        logger.info("Downloading {}".format(backup_path))
        # pylint: disable=no-member
        self.admin_remote.download(backup_path, local_path)
        # pylint: enable=no-member
        assert_true(os.path.exists(local_path))

        if repos_backup_path:
            self.octane_action("repo-backup", repos_backup_path)
            logger.info("Downloading {}".format(repos_backup_path))
            # pylint: disable=no-member
            self.admin_remote.download(repos_backup_path, repos_local_path)
            # pylint: enable=no-member
            assert_true(os.path.exists(repos_local_path))
Ejemplo n.º 5
0
    def check_instance_connectivity(remote, dhcp_namespace, instance_ip,
                                    instance_keypair):
        cmd_check_ns = 'ip netns list'
        namespaces = [l.strip() for l in run_on_remote(remote, cmd_check_ns)]
        logger.debug('Net namespaces on remote: {0}.'.format(namespaces))
        assert_true(
            dhcp_namespace in namespaces,
            "Network namespace '{0}' doesn't exist on "
            "remote slave!".format(dhcp_namespace))
        instance_key_path = '/root/.ssh/instancekey_rsa'
        run_on_remote(
            remote, 'echo "{0}" > {1} && chmod 400 {1}'.format(
                instance_keypair.private_key, instance_key_path))

        cmd = (". openrc; ip netns exec {0} ssh -i {1}"
               " -o 'StrictHostKeyChecking no'"
               " cirros@{2} \"ping -c 1 {3}\"").format(dhcp_namespace,
                                                       instance_key_path,
                                                       instance_ip,
                                                       settings.PUBLIC_TEST_IP)
        err_msg = ("SSH command:\n{command}\nwas not completed with "
                   "exit code 0 after 3 attempts with 1 minute timeout.")
        wait(lambda: remote.execute(cmd)['exit_code'] == 0,
             interval=60,
             timeout=3 * 60,
             timeout_msg=err_msg.format(command=cmd))
        res = remote.execute(cmd)
        assert_equal(
            0, res['exit_code'], 'Instance has no connectivity, exit code {0},'
            'stdout {1}, stderr {2}'.format(res['exit_code'], res['stdout'],
                                            res['stderr']))
Ejemplo n.º 6
0
    def prepare_ubuntu_bootstrap(self):
        """Verify than slaves retrieved ubuntu bootstrap instead CentOS

        Scenario:
            1. Revert snapshot ready
            2. Choose Ubuntu bootstrap on master node
            3. Bootstrap slaves
            4. Verify bootstrap on slaves

        Duration 15m
        Snapshot: prepare_ubuntu_bootstrap
        """

        self.env.revert_snapshot("ready")

        # Run script on master node to change bootstrap to Ubuntu
        with self.env.d_env.get_admin_remote() as remote:

            cmd = 'fuel-bootstrap-image-set ubuntu'
            run_on_remote(remote, cmd)

            # Should be removed after Bug#1482242 will be fixed
            cmd = 'dockerctl shell cobbler service dnsmasq restart'
            run_on_remote(remote, cmd)

        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3])

        # Verify version of bootstrap on slaves
        self.verify_bootstrap_on_slaves(self.env.d_env.nodes().slaves[:3])

        self.env.make_snapshot("prepare_ubuntu_bootstrap", is_make=True)
Ejemplo n.º 7
0
    def check_instance_connectivity(cls, remote, dhcp_namespace, instance_ip,
                                    instance_keypair):
        cmd_check_ns = 'ip netns list'
        namespaces = [l.strip() for l in run_on_remote(remote, cmd_check_ns)]
        logger.debug('Net namespaces on remote: {0}.'.format(namespaces))
        assert_true(dhcp_namespace in namespaces,
                    "Network namespace '{0}' doesn't exist on "
                    "remote slave!".format(dhcp_namespace))
        instance_key_path = '/root/.ssh/instancekey_rsa'
        run_on_remote(remote, 'echo "{0}" > {1} && chmod 400 {1}'.format(
            instance_keypair.private_key, instance_key_path))

        cmd = (". openrc; ip netns exec {0} ssh -i {1}"
               " -o 'StrictHostKeyChecking no'"
               " cirros@{2} \"ping -c 1 {3}\"").format(dhcp_namespace,
                                                       instance_key_path,
                                                       instance_ip,
                                                       settings.PUBLIC_TEST_IP)
        wait(lambda: remote.execute(cmd)['exit_code'] == 0, timeout=2 * 60)
        res = remote.execute(cmd)
        assert_equal(0, res['exit_code'],
                     'Instance has no connectivity, exit code {0},'
                     'stdout {1}, stderr {2}'.format(res['exit_code'],
                                                     res['stdout'],
                                                     res['stderr']))
Ejemplo n.º 8
0
 def upload_settings(self, cluster_id, remote, settings):
     data = json.dumps(settings)
     cmd = 'cd /tmp && echo {data} > settings_{id}.json'.format(
         data=json.dumps(data), id=cluster_id)
     run_on_remote(remote, cmd)
     cmd = ('fuel --env {0} settings --upload --dir /tmp --json'.format(
         cluster_id))
     run_on_remote(remote, cmd)
Ejemplo n.º 9
0
 def add_nodes_to_cluster(self, remote, cluster_id, node_ids, roles):
     if isinstance(node_ids, int):
         node_ids_str = str(node_ids)
     else:
         node_ids_str = ','.join(str(n) for n in node_ids)
     cmd = ('fuel --env-id={0} node set --node {1} --role={2}'.format(
         cluster_id, node_ids_str, ','.join(roles)))
     run_on_remote(remote, cmd)
Ejemplo n.º 10
0
    def cli_deploy_neutron_tun(self):
        """Deploy neutron_tun cluster using Fuel CLI

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 1 node with cinder role
            5. Deploy the cluster
            6. Run network verification
            7. Run OSTF

        Duration 40m
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        node_ids = [
            self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[slave_id])['id']
            for slave_id in range(3)
        ]
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]

        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(1, initialize=True)
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             release_id))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']

            self.update_cli_network_configuration(cluster_id, remote)

            self.update_ssl_configuration(cluster_id, remote)
            self.show_step(2)
            self.show_step(3)
            self.show_step(4)
            self.add_nodes_to_cluster(remote, cluster_id, node_ids[0],
                                      ['controller'])
            self.add_nodes_to_cluster(remote, cluster_id, node_ids[1],
                                      ['compute'])
            self.add_nodes_to_cluster(remote, cluster_id, node_ids[2],
                                      ['cinder'])

            self.fuel_web.verify_network(cluster_id)
            self.show_step(5)
            cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=130 * 60)

            self.show_step(6)
            self.fuel_web.verify_network(cluster_id)

            self.show_step(7)
            self.fuel_web.run_ostf(cluster_id=cluster_id,
                                   test_sets=['ha', 'smoke', 'sanity'],
                                   should_fail=1)
Ejemplo n.º 11
0
 def update_network(self, cluster_id, remote, net_config):
     net_file = self.get_network_filename(cluster_id, remote)
     data = json.dumps(net_config)
     cmd = 'echo {data} > {net_file}'.format(data=json.dumps(data),
                                             net_file=net_file)
     run_on_remote(remote, cmd)
     cmd = ('cd /tmp; fuel --env {0} network --upload --json'.format(
         cluster_id))
     run_on_remote(remote, cmd)
Ejemplo n.º 12
0
 def upload_settings(self, cluster_id, remote, settings):
     data = json.dumps(settings)
     cmd = 'cd /tmp && echo {data} > settings_{id}.json'.format(
         data=json.dumps(data),
         id=cluster_id)
     run_on_remote(remote, cmd)
     cmd = ('fuel --env {0} settings --upload --dir /tmp --json'.format(
         cluster_id))
     run_on_remote(remote, cmd)
Ejemplo n.º 13
0
 def add_nodes_to_cluster(
         self, remote, cluster_id, node_ids, roles):
     if isinstance(node_ids, int):
         node_ids_str = str(node_ids)
     else:
         node_ids_str = ','.join(str(n) for n in node_ids)
     cmd = ('fuel --env-id={0} node set --node {1} --role={2}'.format(
         cluster_id, node_ids_str, ','.join(roles)))
     run_on_remote(remote, cmd)
Ejemplo n.º 14
0
 def update_network(self, cluster_id, remote, net_config):
     net_file = self.get_network_filename(cluster_id, remote)
     data = json.dumps(net_config)
     cmd = 'echo {data} > {net_file}'.format(data=json.dumps(data),
                                             net_file=net_file)
     run_on_remote(remote, cmd)
     cmd = ('cd /tmp; fuel --env {0} network --upload --json'
            .format(cluster_id))
     run_on_remote(remote, cmd)
Ejemplo n.º 15
0
    def cli_deploy_tasks(self):
        """Deploy neutron_tun cluster using Fuel CLI

        Scenario:
            1. Create new environment
            2. Add 3 nodes with controller role
            3. Provision 3 controllers
            4. Start netconfig on second controller
            5. Deploy the cluster
            6. Run network verification
            7. Run OSTF

        Duration 50m
        """
        self.env.revert_snapshot("ready_with_3_slaves")
        node_ids = [
            self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[slave_id])['id']
            for slave_id in range(3)
        ]

        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]

        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(1)
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=vlan --json'.format(self.__class__.__name__,
                                              release_id))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']
            self.show_step(2)
            self.add_nodes_to_cluster(remote, cluster_id, node_ids[0:3],
                                      ['controller'])
            self.show_step(3)
            cmd = (
                'fuel node --node-id {0} --provision --env {1} --json'.format(
                    ','.join(str(n) for n in node_ids), cluster_id))
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=20 * 60)
            self.show_step(4)
            cmd = (
                'fuel node --node {0} --end netconfig --env {1} --json'.format(
                    node_ids[1], release_id))
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=30 * 60)
            self.show_step(5)
            cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=130 * 60)
            self.show_step(6)
            self.fuel_web.verify_network(cluster_id)
            self.show_step(7)
            self.fuel_web.run_ostf(cluster_id=cluster_id,
                                   test_sets=['ha', 'smoke', 'sanity'],
                                   should_fail=1)
Ejemplo n.º 16
0
    def cli_deploy_neutron_tun(self):
        """Deploy neutron_tun cluster using Fuel CLI

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 1 node with cinder role
            5. Deploy the cluster
            6. Run network verification
            7. Run OSTF

        Duration 40m
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        node_ids = [self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[slave_id])['id']
            for slave_id in range(3)]
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]

        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(1, initialize=True)
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             release_id))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']

            self.update_cli_network_configuration(cluster_id, remote)

            self.update_ssl_configuration(cluster_id, remote)
            self.show_step(2)
            self.show_step(3)
            self.show_step(4)
            self.add_nodes_to_cluster(remote, cluster_id, node_ids[0],
                                      ['controller'])
            self.add_nodes_to_cluster(remote, cluster_id, node_ids[1],
                                      ['compute'])
            self.add_nodes_to_cluster(remote, cluster_id, node_ids[2],
                                      ['cinder'])

            self.fuel_web.verify_network(cluster_id)
            self.show_step(5)
            cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=130 * 60)

            self.show_step(6)
            self.fuel_web.verify_network(cluster_id)

            self.show_step(7)
            self.fuel_web.run_ostf(
                cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'],
                should_fail=1)
Ejemplo n.º 17
0
 def prepare_mirrors_environment(self):
     # TODO(akostrikov) Create the same Dockerfile for centos 6.5?
     # TODO(akostrikov) Test yum.
     snapshot_name = 'prepare_mirrors_environment'
     self.check_run(snapshot_name)
     self.env.revert_snapshot('empty')
     logger.info('Prepare environment for mirror checks.')
     with self.env.d_env.get_admin_remote() as remote:
         run_on_remote(remote, 'docker pull ubuntu')
         run_on_remote(remote, 'docker pull nginx')
     # TODO(akostrikov) add check that images are present.
     self.env.make_snapshot(snapshot_name, is_make=True)
Ejemplo n.º 18
0
    def cli_deploy_tasks(self):
        """Deploy neutron_tun cluster using Fuel CLI

        Scenario:
            1. Create new environment
            2. Add 3 nodes with controller role
            3. Provision 3 controllers
            4. Start netconfig on second controller
            5. Deploy the cluster
            6. Run network verification
            7. Run OSTF

        Duration 50m
        """
        self.env.revert_snapshot("ready_with_3_slaves")
        node_ids = [self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[slave_id])['id']
            for slave_id in range(3)]

        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]

        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(1)
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=vlan --json'.format(self.__class__.__name__,
                                              release_id))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']
            self.show_step(2)
            self.add_nodes_to_cluster(remote, cluster_id, node_ids[0:3],
                                      ['controller'])
            self.show_step(3)
            cmd = ('fuel node --node-id {0} --provision --env {1} --json'.
                   format(','.join(str(n) for n in node_ids), cluster_id))
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=20 * 60)
            self.show_step(4)
            cmd = ('fuel node --node {0} --end netconfig --env {1} --json'.
                   format(node_ids[1], release_id))
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=30 * 60)
            self.show_step(5)
            cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=130 * 60)
            self.show_step(6)
            self.fuel_web.verify_network(cluster_id)
            self.show_step(7)
            self.fuel_web.run_ostf(
                cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'],
                should_fail=1)
Ejemplo n.º 19
0
def stop_monitor(remote):
    """Stops ceph-mon service depending on Linux distribution.

    :param remote: devops.helpers.helpers.SSHClient
    :return: None
    :raise: DistributionNotSupported
    """
    logger.debug("Stopping Ceph monitor on {0}".format(remote.host))
    check_distribution()
    if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU:
        run_on_remote(remote, 'stop ceph-mon-all')
    if OPENSTACK_RELEASE == OPENSTACK_RELEASE_CENTOS:
        run_on_remote(remote, '/etc/init.d/ceph stop')
Ejemplo n.º 20
0
def stop_monitor(remote):
    """Stops ceph-mon service depending on Linux distribution.

    :param remote: devops.helpers.helpers.SSHClient
    :return: None
    :raise: DistributionNotSupported
    """
    logger.debug("Stopping Ceph monitor on {0}".format(remote.host))
    check_distribution()
    if OPENSTACK_RELEASE == OPENSTACK_RELEASE_UBUNTU:
        run_on_remote(remote, 'stop ceph-mon-all')
    if OPENSTACK_RELEASE == OPENSTACK_RELEASE_CENTOS:
        run_on_remote(remote, '/etc/init.d/ceph stop')
    def do_restore(self,
                   backup_path, local_path,
                   repos_backup_path=None, repos_local_path=None):
        """ Wrapper for restore process of upgrading procedure"""
        # BOTH repos arguments should be passed at the same time
        # or BOTH should not be passed
        assert_equal(bool(repos_backup_path), bool(repos_local_path),
                     "Both repos arguments should be specified")
        self.install_octane()

        cmd = "mkdir -p {}".format(self.remote_dir_for_backups)
        run_on_remote(self.admin_remote, cmd)

        logger.info("Uploading {}".format(local_path))
        # pylint: disable=no-member
        self.admin_remote.upload(local_path, backup_path)
        # pylint: enable=no-member
        logger.info("Applying backup from {}".format(backup_path))
        self.octane_action("restore", backup_path)

        if repos_backup_path:
            logger.info("Uploading {}".format(repos_local_path))
            # pylint: disable=no-member
            self.admin_remote.upload(repos_local_path, repos_backup_path)
            # pylint: enable=no-member
            logger.info("Applying backup from {}".format(repos_backup_path))
            self.octane_action("repo-restore", repos_backup_path)

        logger.info(
            "Update existing CentOS bootstrap image using restored ssh keys")
        self.octane_action('update-bootstrap-centos')

        n_nodes = self.fuel_web.client.list_nodes()
        d_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_nodes)
        discover_n_nodes = [node for node in self.fuel_web.client.list_nodes()
                            if self.fuel_web.is_node_discovered(node)]

        if discover_n_nodes:
            logger.info("Rebooting bootstrapped nodes")
            discover_d_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes(
                discover_n_nodes)
            self.fuel_web.cold_restart_nodes(discover_d_nodes)

        # LP: 1561092 mcollective can stuck after upgrade
        logger.info("Applying fix for LP:1561092")
        for node in d_nodes:
            with self.fuel_web.get_ssh_for_node(node_name=node.name) as remote:
                run_on_remote(remote, "service mcollective restart")
Ejemplo n.º 22
0
    def cli_update_role(self):
        """Update controller role using Fuel CLI

        Scenario:
            1. Revert snapshot "ready_with_3_slaves"
            2. Download controller role yaml to master
            3. Remove section "conflicts" under "meta" section
            4. Upload changes using Fuel CLI
            5. Create new cluster
            6. Add new node to cluster with controller+compute

        Duration 20m
        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")
        node_ids = [self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[slave_id])['id']
            for slave_id in range(3)]
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]

        self.show_step(2)
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel role --rel {} --role controller --file'
                ' /tmp/controller.yaml'.format(release_id))

        self.show_step(3)
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd="sed -i '/conflicts/,+1 d' /tmp/controller.yaml")

        self.show_step(4)
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel role --rel {} --update --file'
                ' /tmp/controller.yaml'.format(release_id))

        with self.env.d_env.get_admin_remote() as remote:

            if NEUTRON_SEGMENT_TYPE:
                nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
            else:
                nst = ''
            self.show_step(5)
            cmd = ('fuel env create --name={0} --release={1} '
                   '{2} --json'.format(self.__class__.__name__,
                                       release_id, nst))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']
            self.show_step(6)
            cmd = ('fuel --env-id={0} node set --node {1} --role=controller,'
                   'compute'.format(cluster_id, node_ids[0]))
            result = remote.execute(cmd)
            assert_equal(result['exit_code'], 0,
                         "Can't assign controller and compute node"
                         " to node id {}".format(node_ids[0]))

            self.env.make_snapshot("cli_update_role")
Ejemplo n.º 23
0
def get_version(remote):
    """Returns Ceph version

    :param remote: devops.helpers.helpers.SSHClient
    :return: str
    """
    cmd = 'ceph --version'
    return run_on_remote(remote, cmd)[0].split(' ')[2]
Ejemplo n.º 24
0
def get_version(remote):
    """Returns Ceph version

    :param remote: devops.helpers.helpers.SSHClient
    :return: str
    """
    cmd = 'ceph --version'
    return run_on_remote(remote, cmd)[0].split(' ')[2]
    def upgrade_detach_plugin_restore(self):
        """Reinstall Fuel and restore data with cluster with detach-db plugin

        Scenario:
        1. Revert "upgrade_detach_plugin_backup" snapshot
        2. Reinstall Fuel master using iso given in ISO_PATH
        3. Install fuel-octane package
        4. Upload the backup back to reinstalled Fuel maser node
        5. Restore master node using 'octane fuel-restore'
        6. Ensure that plugin were restored
        7. Verify networks for restored cluster
        8. Run OSTF for restored cluster

        Snapshot: upgrade_detach_plugin_restore
        Duration: TODO
        """
        assert_true(os.path.exists(self.repos_local_path))
        assert_true(os.path.exists(self.local_path))

        self.check_run(self.snapshot_name)
        self.show_step(1, initialize=True)
        assert_true(
            self.env.revert_snapshot(self.source_snapshot_name),
            "The test can not use given environment - snapshot "
            "{!r} does not exists".format(self.source_snapshot_name))

        cluster_id = self.fuel_web.get_last_created_cluster()

        self.show_step(2)
        self.env.reinstall_master_node()
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.do_restore(self.backup_path, self.local_path,
                        self.repos_backup_path, self.repos_local_path)
        self.fuel_web.change_default_network_settings()
        self.env.sync_time()

        self.show_step(6)
        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        assert_true('detach-database' in attr['editable'],
                    "Can't find plugin data in cluster attributes!")
        stdout = run_on_remote(
            self.admin_remote,
            "find /var/www/nailgun/plugins/ -name detach-database*")
        assert_not_equal(len(stdout), 0, "Can not find plugin's directory")
        plugin_dir = stdout[0].strip()

        checkers.check_file_exists(self.admin_remote,
                                   os.path.join(plugin_dir, "metadata.yaml"))

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(8)
        self.fuel_web.run_ostf(cluster_id)

        self.env.make_snapshot(self.snapshot_name, is_make=True)
        self.cleanup()
Ejemplo n.º 26
0
def get_rbd_images_list(remote, pool):
    """Returns all OSD ids.

    :param remote: devops.helpers.helpers.SSHClient
    :param pool: string, can be: 'images', 'volumes', etc.
    :return: JSON-like object
    """
    cmd = 'rbd --pool {pool} --format json ls -l'.format(pool=pool)
    return run_on_remote(remote, cmd, jsonify=True)
Ejemplo n.º 27
0
def get_rbd_images_list(remote, pool):
    """Returns all OSD ids.

    :param remote: devops.helpers.helpers.SSHClient
    :param pool: string, can be: 'images', 'volumes', etc.
    :return: JSON-like object
    """
    cmd = 'rbd --pool {pool} --format json ls -l'.format(pool=pool)
    return run_on_remote(remote, cmd, jsonify=True)
Ejemplo n.º 28
0
def get_osd_tree(remote):
    """Returns OSDs according to their position in the CRUSH map.

    :param remote: devops.helpers.helpers.SSHClient
    :return: JSON-like object
    """
    logger.debug("Fetching Ceph OSD tree")
    cmd = 'ceph osd tree -f json'
    return run_on_remote(remote, cmd, jsonify=True)
Ejemplo n.º 29
0
def get_osd_tree(remote):
    """Returns OSDs according to their position in the CRUSH map.

    :param remote: devops.helpers.helpers.SSHClient
    :return: JSON-like object
    """
    logger.debug("Fetching Ceph OSD tree")
    cmd = 'ceph osd tree -f json'
    return run_on_remote(remote, cmd, jsonify=True)
Ejemplo n.º 30
0
def get_osd_ids(remote):
    """Returns all OSD ids.

    :param remote: devops.helpers.helpers.SSHClient
    :return: JSON-like object
    """
    logger.debug("Fetching Ceph OSD ids")
    cmd = 'ceph osd ls -f json'
    return run_on_remote(remote, cmd, jsonify=True)
Ejemplo n.º 31
0
def get_osd_ids(remote):
    """Returns all OSD ids.

    :param remote: devops.helpers.helpers.SSHClient
    :return: JSON-like object
    """
    logger.debug("Fetching Ceph OSD ids")
    cmd = 'ceph osd ls -f json'
    return run_on_remote(remote, cmd, jsonify=True)
Ejemplo n.º 32
0
    def gate_patch_fuel_agent(self):
        """ Revert snapshot, update fuel-agent, bootstrap from review
        and provision one node

    Scenario:
        1. Revert snapshot "ready"
        2. Update fuel-agent on master node
        3. Update bootstrap
        4. Bootstrap 1 slave
        5. Create environment via FUEL CLI
        6. Assign controller role
        7. Provisioning node

        """
        if not settings.UPDATE_FUEL:
                raise Exception("{} variable don't exist"
                                .format(settings.UPDATE_FUEL))
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready")

        self.show_step(2)
        replace_fuel_agent_rpm(self.env)

        self.show_step(3)
        patch_and_assemble_ubuntu_bootstrap(self.env)

        self.show_step(4)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[:1])

        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]

        self.show_step(5)
        with self.env.d_env.get_admin_remote() as remote:
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             release_id))
            env_result = run_on_remote(
                remote, cmd, jsonify=True, cli_command=True)
            cluster_id = env_result['id']

        self.show_step(6)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
            }
        )

        cluster_id = self.fuel_web.get_last_created_cluster()
        self.show_step(7)
        self.fuel_web.provisioning_cluster_wait(cluster_id)

        self.env.make_snapshot("review_fuel_agent_one_node_provision")
Ejemplo n.º 33
0
    def gate_fuel_web(self):
        """
    Scenario:
        1. Revert snapshot "empty"
        2. Apply changes into nailgun
        3. Get release id
        4. Update networks
        5. Bootstrap 3 nodes
        6. Create cluster
        7. Add 1 controller nodes
        8. Add 1 compute node
        9. Add 1 cinder node
        10. Deploy environment
        11. Run OSTF
        """
        if not UPDATE_FUEL:
            raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
        self.show_step(1)
        self.env.revert_snapshot("empty")
        self.show_step(2)
        replace_fuel_nailgun_rpm(self.env)
        self.show_step(3)
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]
        self.show_step(4)
        self.fuel_web.change_default_network_settings()
        self.show_step(5)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[:3])
        self.show_step(6)
        with self.env.d_env.get_admin_remote() as remote:
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             release_id))
            env_result = run_on_remote(
                remote, cmd, jsonify=True, cli_command=True)
            cluster_id = env_result['id']

        self.show_step(7)
        self.show_step(8)
        self.show_step(9)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder'],
            }
        )
        self.show_step(10)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(11)
        # run only smoke according to sanity and ha ran in deploy_wait()
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['smoke'])
Ejemplo n.º 34
0
 def verify_bootstrap_on_slaves(self, slaves):
     logger.info("Verify bootstrap on slaves")
     for slave in slaves:
         with self.fuel_web.get_ssh_for_node(slave.name) as slave_remote:
             cmd = 'cat /etc/*release'
             output = run_on_remote(slave_remote, cmd)[0].lower()
             assert_true(
                 "ubuntu" in output,
                 "Slave {0} doesn't use Ubuntu image for\
                 bootstrap after Ubuntu images \
                 were enabled".format(slave.name))
Ejemplo n.º 35
0
    def gate_patch_fuel_agent(self):
        """ Revert snapshot, update fuel-agent, bootstrap from review
        and provision one node

    Scenario:
        1. Revert snapshot "ready"
        2. Update fuel-agent in MCollective
        3. Update bootstrap
        4. Bootstrap 1 slave
        5. Create environment via FUEL CLI
        6. Assign controller role
        7. Provisioning node

        """
        if not settings.UPDATE_FUEL:
                raise Exception("{} variable don't exist"
                                .format(settings.UPDATE_FUEL))
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready")

        self.show_step(2)
        replace_fuel_agent_rpm(self.env)

        self.show_step(3)
        patch_and_assemble_ubuntu_bootstrap(self.env)

        self.show_step(4)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[:1])

        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]

        self.show_step(5)
        with self.env.d_env.get_admin_remote() as remote:
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             release_id))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']

        self.show_step(6)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
            }
        )

        cluster_id = self.fuel_web.get_last_created_cluster()
        self.show_step(7)
        self.fuel_web.provisioning_cluster_wait(cluster_id)

        self.env.make_snapshot("review_fuel_agent_one_node_provision")
Ejemplo n.º 36
0
    def cli_create_role_with_has_primary(self):
        """Create new role using Fuel CLI

        Scenario:
            1. Revert snapshot "ready_with_3_slaves"
            2. Upload new role yaml to master
            3. Upload yaml to nailgun using Fuel CLI
            4. Create new cluster
            5. Try to create node with new role

        Duration 20m
        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")
        node_ids = [
            self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[slave_id])['id']
            for slave_id in range(3)
        ]
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]
        templates_path = os.path.join(
            '{0}/fuelweb_test/config_templates/'.format(
                os.environ.get("WORKSPACE", "./")), 'create_primary_role.yaml')
        self.show_step(2)
        if os.path.exists(templates_path):
            self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip,
                                              templates_path, '/tmp')
        self.show_step(3)
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel role --rel {} --create --file'
            ' /tmp/create_primary_role.yaml'.format(release_id))

        with self.env.d_env.get_admin_remote() as remote:

            if NEUTRON_SEGMENT_TYPE:
                nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
            else:
                nst = ''
            self.show_step(4)
            cmd = ('fuel env create --name={0} --release={1} '
                   '{2} --json'.format(self.__class__.__name__, release_id,
                                       nst))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']
            self.show_step(5)
            cmd = ('fuel --env-id={0} node set --node {1}'
                   ' --role=test-primary-role'.format(cluster_id, node_ids[0]))
            result = remote.execute(cmd)
            assert_equal(
                result['exit_code'], 0, "Can't assign new role"
                " to node id {}".format(node_ids[0]))
            self.env.make_snapshot("cli_create_role_with_has_primary")
    def octane_action(self, action, path=None):
        assert_true(action in self.OCTANE_COMMANDS.keys(),
                    "Unknown octane action '{}', aborting".format(action))
        octane_cli_args = {
            'path': path,
            'pwd': settings.KEYSTONE_CREDS['password']
        }
        if 'backup' in action:
            # pylint: disable=no-member
            assert_false(self.admin_remote.exists(path),
                         'File already exists, not able to reuse')
            # pylint: enable=no-member
        elif 'restore' in action:
            checkers.check_file_exists(self.admin_remote, path)

        run_on_remote(self.admin_remote,
                      self.OCTANE_COMMANDS[action].format(**octane_cli_args))

        if 'backup' in action:
            checkers.check_file_exists(self.admin_remote, path)
Ejemplo n.º 38
0
def get_monitor_node_fqdns(remote):
    """Returns node FQDNs with Ceph monitor service is running.

    :param remote: devops.helpers.helpers.SSHClient
    :return: list of FQDNs
    """
    cmd = 'ceph mon_status -f json'
    result = run_on_remote(remote, cmd, jsonify=True)
    fqdns = [i['name'] + DNS_SUFFIX for i in result['monmap']['mons']]
    msg = "Ceph monitor service is running on {0}".format(', '.join(fqdns))
    logger.debug(msg)
    return fqdns
Ejemplo n.º 39
0
    def cli_create_role_with_has_primary(self):
        """Create new role using Fuel CLI

        Scenario:
            1. Revert snapshot "ready_with_3_slaves"
            2. Upload new role yaml to master
            3. Upload yaml to nailgun using Fuel CLI
            4. Create new cluster
            5. Try to create node with new role

        Duration 20m
        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")
        node_ids = [self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[slave_id])['id']
            for slave_id in range(3)]
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]
        templates_path = os.path.join(
            '{0}/fuelweb_test/config_templates/'.format(os.environ.get(
                "WORKSPACE", "./")), 'create_primary_role.yaml')
        self.show_step(2)
        if os.path.exists(templates_path):
            self.ssh_manager.upload_to_remote(self.ssh_manager.admin_ip,
                                              templates_path, '/tmp')
        self.show_step(3)
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel role --rel {} --create --file'
                ' /tmp/create_primary_role.yaml'.format(release_id))

        with self.env.d_env.get_admin_remote() as remote:

            if NEUTRON_SEGMENT_TYPE:
                nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
            else:
                nst = ''
            self.show_step(4)
            cmd = ('fuel env create --name={0} --release={1} '
                   '{2} --json'.format(self.__class__.__name__,
                                       release_id, nst))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']
            self.show_step(5)
            cmd = ('fuel --env-id={0} node set --node {1}'
                   ' --role=test-primary-role'.format(cluster_id,
                                                      node_ids[0]))
            result = remote.execute(cmd)
            assert_equal(result['exit_code'], 0,
                         "Can't assign new role"
                         " to node id {}".format(node_ids[0]))
            self.env.make_snapshot("cli_create_role_with_has_primary")
Ejemplo n.º 40
0
def get_monitor_node_fqdns(remote):
    """Returns node FQDNs with Ceph monitor service is running.

    :param remote: devops.helpers.helpers.SSHClient
    :return: list of FQDNs
    """
    cmd = 'ceph mon_status -f json'
    result = run_on_remote(remote, cmd, jsonify=True)
    fqdns = [i['name'] + DNS_SUFFIX for i in result['monmap']['mons']]
    msg = "Ceph monitor service is running on {0}".format(', '.join(fqdns))
    logger.debug(msg)
    return fqdns
    def prepare_upgrade_detach_plugin(self):
        self.backup_name = "backup_detach_plugin.tar.gz"
        self.repos_backup_name = "repos_backup_detach_plugin.tar.gz"

        self.check_run("upgrade_detach_plugin_backup")
        self.env.revert_snapshot("ready", skip_timesync=True)

        run_on_remote(
            self.admin_remote,
            "yum -y install git python-pip createrepo dpkg-devel dpkg-dev rpm "
            "rpm-build && pip install fuel-plugin-builder")

        run_on_remote(
            self.admin_remote,
            "git clone https://github.com/"
            "openstack/fuel-plugin-detach-database")

        cmds = [
            "cd fuel-plugin-detach-database", "git checkout stable/{}".format(
                settings.UPGRADE_FUEL_FROM),
            "fpb --build . ",
            "fuel plugins --install *.rpm "
            "--user {user} --password {pwd}".format(
                user=settings.KEYSTONE_CREDS['username'],
                pwd=settings.KEYSTONE_CREDS['password'])
        ]

        run_on_remote(self.admin_remote, " && ".join(cmds))

        cluster_settings = {
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'objects_ceph': True,
            'ephemeral_ceph': True,
        }
        cluster_settings.update(self.cluster_creds)

        self.deploy_cluster({
            'name': self.prepare_upgrade_detach_plugin.__name__,
            'settings': cluster_settings,
            'plugin':
                {'name': 'detach-database',
                 'data': {'metadata/enabled': True}},
            'nodes':
                {'slave-01': ['controller'],
                 'slave-02': ['controller'],
                 'slave-03': ['controller'],
                 'slave-04': ['standalone-database'],
                 'slave-05': ['standalone-database'],
                 'slave-06': ['standalone-database'],
                 'slave-07': ['compute', 'ceph-osd'],
                 'slave-08': ['compute', 'ceph-osd']}
        })

        self.do_backup(self.backup_path, self.local_path,
                       self.repos_backup_path, self.repos_local_path)
        self.env.make_snapshot("upgrade_detach_plugin_backup", is_make=True)
Ejemplo n.º 42
0
 def verify_bootstrap_on_slaves(self, slaves):
     logger.info("Verify bootstrap on slaves")
     for slave in slaves:
         with self.fuel_web.get_ssh_for_node(
                 slave.name) as slave_remote:
             cmd = 'cat /etc/*release'
             output = run_on_remote(
                 slave_remote, cmd)[0].lower()
             assert_true(
                 "ubuntu" in output,
                 "Slave {0} doesn't use Ubuntu image for\
                 bootstrap after Ubuntu images \
                 were enabled".format(slave.name))
Ejemplo n.º 43
0
def check_neutron_dhcp_lease(remote, instance_ip, instance_mac, dhcp_server_ip,
                             dhcp_port_tag):
    """Check if the DHCP server offers a lease for a client with the specified
       MAC address
       :param SSHClient remote: fuel-devops.helpers.helpers object
       :param str instance_ip: IP address of instance
       :param str instance_mac: MAC address that will be checked
       :param str dhcp_server_ip: IP address of DHCP server for request a lease
       :param str dhcp_port_tag: OVS port tag used for access the DHCP server
       :return bool: True if DHCP lease for the 'instance_mac' was obtained
    """
    logger.debug(
        "Checking DHCP server {0} for lease {1} with MAC address {2}".format(
            dhcp_server_ip, instance_ip, instance_mac))
    ovs_port_name = 'tapdhcptest1'
    ovs_cmd = '/usr/bin/ovs-vsctl --timeout=10 --oneline --format=json -- '
    ovs_add_port_cmd = ("--if-exists del-port {0} -- "
                        "add-port br-int {0} -- "
                        "set Interface {0} type=internal -- "
                        "set Port {0} tag={1}".format(ovs_port_name,
                                                      dhcp_port_tag))
    ovs_del_port_cmd = ("--if-exists del-port {0}".format(ovs_port_name))

    # Add an OVS interface with a tag for accessing the DHCP server
    run_on_remote(remote, ovs_cmd + ovs_add_port_cmd)

    # Set to the created interface the same MAC address
    # that was used for the instance.
    run_on_remote(
        remote, "ifconfig {0} hw ether {1}".format(ovs_port_name,
                                                   instance_mac))
    run_on_remote(remote, "ifconfig {0} up".format(ovs_port_name))

    # Perform a 'dhcpcheck' request to check if the lease can be obtained
    lease = run_on_remote(
        remote, "dhcpcheck request {0} {1} --range_start {2} "
        "--range_end 255.255.255.255 | fgrep \" {1} \"".format(
            ovs_port_name, dhcp_server_ip, instance_ip))

    # Remove the OVS interface
    run_on_remote(remote, ovs_cmd + ovs_del_port_cmd)

    logger.debug("DHCP server answer: {}".format(lease))
    return ' ack ' in lease
Ejemplo n.º 44
0
    def gate_fuel_web(self):
        """
    Scenario:
        1. Revert snapshot "empty"
        2. Apply changes into nailgun
        3. Get release id
        4. Update networks
        5. Bootstrap 3 nodes
        6. Create cluster
        7. Add 1 controller nodes
        8. Add 1 compute node
        9. Add 1 cinder node
        10. Deploy environment
        11. Run OSTF
        """
        if not UPDATE_FUEL:
            raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
        self.show_step(1)
        self.env.revert_snapshot("empty")
        self.show_step(2)
        replace_fuel_nailgun_rpm(self.env)
        self.show_step(3)
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]
        self.show_step(4)
        self.fuel_web.change_default_network_settings()
        self.show_step(5)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:3])
        self.show_step(6)
        with self.env.d_env.get_admin_remote() as remote:
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             release_id))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']

        self.show_step(7)
        self.show_step(8)
        self.show_step(9)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder'],
            })
        self.show_step(10)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(11)
        # run only smoke according to sanity and ha ran in deploy_wait()
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])
Ejemplo n.º 45
0
def check_hiera_hosts(self, nodes, cmd):
    hiera_hosts = []
    for node in nodes:
        with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
            hosts = ''.join(run_on_remote(remote, cmd)).strip().split(',')
            logger.debug("hosts on {0} are {1}".format(node['hostname'],
                                                       hosts))
            if not hiera_hosts:
                hiera_hosts = hosts
                continue
            else:
                assert_true(set(hosts) == set(hiera_hosts),
                            'Hosts on node {0} differ from'
                            ' others'.format(node['hostname']))
Ejemplo n.º 46
0
def check_neutron_dhcp_lease(remote, instance_ip, instance_mac,
                             dhcp_server_ip, dhcp_port_tag):
    """Check if the DHCP server offers a lease for a client with the specified
       MAC address
       :param SSHClient remote: fuel-devops.helpers.helpers object
       :param str instance_ip: IP address of instance
       :param str instance_mac: MAC address that will be checked
       :param str dhcp_server_ip: IP address of DHCP server for request a lease
       :param str dhcp_port_tag: OVS port tag used for access the DHCP server
       :return bool: True if DHCP lease for the 'instance_mac' was obtained
    """
    logger.debug("Checking DHCP server {0} for lease {1} with MAC address {2}"
                 .format(dhcp_server_ip, instance_ip, instance_mac))
    ovs_port_name = 'tapdhcptest1'
    ovs_cmd = '/usr/bin/ovs-vsctl --timeout=10 --oneline --format=json -- '
    ovs_add_port_cmd = ("--if-exists del-port {0} -- "
                        "add-port br-int {0} -- "
                        "set Interface {0} type=internal -- "
                        "set Port {0} tag={1}"
                        .format(ovs_port_name, dhcp_port_tag))
    ovs_del_port_cmd = ("--if-exists del-port {0}".format(ovs_port_name))

    # Add an OVS interface with a tag for accessing the DHCP server
    run_on_remote(remote, ovs_cmd + ovs_add_port_cmd)

    # Set to the created interface the same MAC address
    # that was used for the instance.
    run_on_remote(remote, "ifconfig {0} hw ether {1}".format(ovs_port_name,
                                                             instance_mac))
    run_on_remote(remote, "ifconfig {0} up".format(ovs_port_name))

    # Perform a 'dhcpcheck' request to check if the lease can be obtained
    lease = run_on_remote(remote,
                          "dhcpcheck request {0} {1} --range_start {2} "
                          "--range_end 255.255.255.255 | fgrep \" {1} \""
                          .format(ovs_port_name, dhcp_server_ip, instance_ip))

    # Remove the OVS interface
    run_on_remote(remote, ovs_cmd + ovs_del_port_cmd)

    logger.debug("DHCP server answer: {}".format(lease))
    return ' ack ' in lease
    def install_octane(self):
        """ Install fuel-octane package to master node"""
        del self.admin_remote
        conf_file = None
        if settings.FUEL_PROPOSED_REPO_URL:
            conf_file = '/etc/yum.repos.d/fuel-proposed.repo'
            settings.FUEL_PROPOSED_REPO_URL = os.environ.get(
                'FUEL_PROPOSED_REPO_URL')
            cmd = ("echo -e "
                   "'[fuel-proposed]\n"
                   "name=fuel-proposed\n"
                   "baseurl={}/\n"
                   "gpgcheck=0\n"
                   "priority=1' > {}").format(
                       settings.FUEL_PROPOSED_REPO_URL,
                       conf_file)

            run_on_remote(self.admin_remote, cmd)

        run_on_remote(self.admin_remote, "yum install -y fuel-octane")

        if settings.FUEL_PROPOSED_REPO_URL:
            # pylint: disable=no-member
            self.admin_remote.rm_rf(conf_file)
    def gate_patch_fuel_agent(self):
        """ Revert snapshot, update fuel-agent, bootstrap from review
        and provision one node

    Scenario:
        1. Revert snapshot "ready"
        2. Update fuel-agent in MCollective
        3. Update bootstrap
        4. Bootstrap 1 slave
        5. Create environment via FUEL CLI
        6. Assign controller role
        7. Provisioning node

        """
        if not settings.UPDATE_FUEL:
            raise Exception("{} variable don't exist".format(settings.UPDATE_FUEL))
        self.show_step(1)
        self.env.revert_snapshot("ready")

        self.show_step(2)
        replace_fuel_agent_rpm(self.env)

        self.show_step(3)
        replace_bootstrap(self.env)

        self.show_step(4)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:1])

        release_id = self.fuel_web.get_releases_list_for_os(release_name=OPENSTACK_RELEASE)[0]

        self.show_step(5)
        with self.env.d_env.get_admin_remote() as remote:
            cmd = "fuel env create --name={0} --release={1} " "--nst={2} --json".format(
                self.__class__.__name__, release_id, NEUTRON_SEGMENT_TYPE
            )
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result["id"]

        self.show_step(6)
        self.fuel_web.update_nodes(cluster_id, {"slave-01": ["controller"]})

        cluster_id = self.fuel_web.get_last_created_cluster()
        self.show_step(7)
        self.fuel_web.provisioning_cluster_wait(cluster_id)

        self.env.make_snapshot("review_fuel_agent_one_node_provision")
Ejemplo n.º 49
0
def run_upgrade_script(node_ssh,
                       script_path,
                       script_name,
                       password='******',
                       rollback=False,
                       exit_code=0):
    path = os.path.join(script_path, script_name)
    check_file_exists(node_ssh, path)
    c_res = node_ssh.execute('chmod 755 {0}'.format(path))
    logger.debug("Result of chmod is {0}".format(c_res))
    if rollback:
        path = "UPGRADERS='host-system docker openstack" \
               " raise-error' {0}/{1}" \
               " --password {2}".format(script_path, script_name, password)
    else:
        path = "{0}/{1} --no-rollback --password {2}".format(
            script_path, script_name, password)

    result = run_on_remote_get_results(node_ssh,
                                       path,
                                       assert_ec_equal=[exit_code],
                                       raise_on_assert=False)

    # TODO: check that we really need this log from fuel_upgrade.log
    if result['exit_code'] != exit_code:
        log = "".join(
            run_on_remote(node_ssh,
                          "awk -v p=\"UPGRADE FAILED\" 'BEGIN{m=\"\"}"
                          " {if ($0 ~ p) {m=$0} else m=m\"\\n\"$0}"
                          " END{if (m ~ p) print m}'"
                          " /var/log/fuel_upgrade.log",
                          raise_on_assert=False))

        logger.error("Message from /var/log/fuel_upgrade.log:\n"
                     "{log}".format(log=log))

    assert_equal(
        result['exit_code'], exit_code,
        "Upgrade script failed with exit code {exit_code}, "
        "please inspect logs for details.\n"
        "last output: \"{output}\""
        "".format(exit_code=result['exit_code'],
                  output=''.join(result['stdout'][-5:]) +
                  result['stderr_str']))
Ejemplo n.º 50
0
Archivo: ovs.py Proyecto: SergK/fuel-qa
def ovs_get_data(remote, table, columns=None):
    """Get data from a specified OpenVSwitch table

       :param SSHClient remote: fuel-devops.helpers.helpers object
       :param str table: ovs table name (see `ovsdb-client list-tables`)
       :param list columns:
           list of strings to get specified columns. if None - all columns
           will be requested.
       :return dict: data from JSON object
    """
    if columns:
        col = '--columns=' + ','.join(columns)
    else:
        col = ''
    cmd = ('ovs-vsctl --oneline --format=json {columns} list {table}'
           .format(columns=col, table=table))
    res = run_on_remote(remote, cmd, jsonify=True)
    logger.debug("OVS output of the command '{0}': {1}".format(cmd, res))
    return res
Ejemplo n.º 51
0
def run_upgrade_script(node_ssh, script_path, script_name, password='******',
                       rollback=False, exit_code=0):
    path = os.path.join(script_path, script_name)
    check_file_exists(node_ssh, path)
    c_res = node_ssh.execute('chmod 755 {0}'.format(path))
    logger.debug("Result of chmod is {0}".format(c_res))
    if rollback:
        path = "UPGRADERS='host-system docker openstack" \
               " raise-error' {0}/{1}" \
               " --password {2}".format(script_path, script_name, password)
    else:
        path = "{0}/{1} --no-rollback --password {2}".format(script_path,
                                                             script_name,
                                                             password)

    result = run_on_remote_get_results(node_ssh, path,
                                       assert_ec_equal=[exit_code],
                                       raise_on_assert=False)

    # TODO: check that we really need this log from fuel_upgrade.log
    if result['exit_code'] != exit_code:
        log = "".join(
            run_on_remote(node_ssh,
                          "awk -v p=\"UPGRADE FAILED\" 'BEGIN{m=\"\"}"
                          " {if ($0 ~ p) {m=$0} else m=m\"\\n\"$0}"
                          " END{if (m ~ p) print m}'"
                          " /var/log/fuel_upgrade.log",
                          raise_on_assert=False)
        )

        logger.error("Message from /var/log/fuel_upgrade.log:\n"
                     "{log}".format(log=log))

    assert_equal(
        result['exit_code'],
        exit_code,
        "Upgrade script failed with exit code {exit_code}, "
        "please inspect logs for details.\n"
        "last output: \"{output}\""
        "".format(exit_code=result['exit_code'],
                  output=''.join(result['stdout'][-5:]) + result['stderr_str'])
    )
Ejemplo n.º 52
0
    def create_backup_reset_restore_and_deploy_via_cli(self):
        """Backup/restore master node with cluster in ha mode

        Scenario:
            1. Create env with 1 Controller, 1 Compute, 1 Ceph
            2. Start provisioning and wait for it is finished
            3. Backup master
            4. Reset env
            5. Restore master
            6. Delete env
            7. Create new env via CLI with the same staff
            8. Start provisioning via CLI

        Duration 75m
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      NEUTRON_SEGMENT_TYPE
                                                  })

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['ceph-osd']
            })
        self.fuel_web.provisioning_cluster_wait(cluster_id)

        with self.env.d_env.get_admin_remote() as remote:
            self.fuel_web.backup_master(remote)
            checkers.backup_check(remote)

        self.fuel_web.stop_reset_env_wait(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3], timeout=10 * 60)

        with self.env.d_env.get_admin_remote() as remote:
            with RunLimit(seconds=60 * 10,
                          error_message="'dockerctl restore' "
                          "ran longer then 600 sec"):
                self.fuel_web.restore_master(remote)
            checkers.restore_check_sum(remote)

        number_of_nodes = len(
            self.fuel_web.client.list_cluster_nodes(cluster_id))

        self.fuel_web.client.delete_cluster(cluster_id)

        try:
            wait((lambda: len(self.fuel_web.client.list_nodes()) ==
                  number_of_nodes),
                 timeout=5 * 60)
        except TimeoutError:
            assert_true(
                len(self.fuel_web.client.list_nodes()) == number_of_nodes,
                'Nodes are not discovered in timeout 5 *60')

        cl = CommandLine()
        release_id = self.fuel_web.get_releases_list_for_os(
            release_name=OPENSTACK_RELEASE)[0]
        node_ids = [
            self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[slave_id])['id']
            for slave_id in range(3)
        ]

        with self.env.d_env.get_admin_remote() as remote:
            # Create an environment
            if NEUTRON_SEGMENT_TYPE:
                nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
            else:
                nst = ''
            cmd = ('fuel env create --name={0} --release={1} '
                   '{2} --json'.format(self.__class__.__name__, release_id,
                                       nst))
            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']

            # Update network parameters
            cl.update_cli_network_configuration(cluster_id, remote)

            # Update SSL configuration
            cl.update_ssl_configuration(cluster_id, remote)

            roles = {
                'controller': node_ids[0],
                'compute': node_ids[1],
                'ceph-osd': node_ids[2]
            }

            for role in roles:
                cmd = (
                    'fuel --env-id={0} node set --node {1} --role={2}'.format(
                        cluster_id, roles[role], role))
                remote.execute(cmd)
            cmd = (
                'fuel --env-id={0} node --provision --node={1} --json'.format(
                    cluster_id, ','.join(str(l) for l in node_ids)))
            logger.info("Started provisioning via CLI")
            task = run_on_remote(remote, cmd, jsonify=True)
            cl.assert_cli_task_success(task, remote, timeout=30 * 60)
            logger.info("Finished provisioning via CLI")
Ejemplo n.º 53
0
    def review_fuel_cli_one_node_deploy(self):
        """ Revert snapshot, apply changes from review and deploy
        cluster with controller node only over cli.

        Scenario:
            1. Revert snapshot 'ready_with_1_slave'
            2. Apply changes from review
            3. Bootstrap 1 node
            4. Show  releases list
            5. Create cluster over cli
            6. Update networks
            7. Update SSL settings
            8. List environments
            9. Add and provision 1 node with controller role
            10. Deploy node
            11. Delete cluster

        Duration 20m
        """
        if not UPDATE_FUEL:
            raise exceptions.FuelQAVariableNotSet(UPDATE_FUEL, 'true')
        self.show_step(1, initialize=True)
        self.env.revert_snapshot('ready_with_1_slaves')
        target_path = '/var/www/nailgun/python-fuelclient/'
        package_name = 'python-fuelclient'
        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(2)
            self.upload_package(remote, target_path, package_name)
            self.replace_package(remote,
                                 package_name=package_name,
                                 package_path=target_path)

        self.show_step(3)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:1])

        node_id = [
            self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[0])['id']
        ]

        with self.env.d_env.get_admin_remote() as remote:
            self.show_step(3)
            # get releases list
            self.show_step(4)
            list_release_cmd = 'fuel release --json'
            list_release_res = run_on_remote(remote,
                                             list_release_cmd,
                                             jsonify=True)
            active_release_id = [
                release['id'] for release in list_release_res
                if release['is_deployable']
            ]
            asserts.assert_true(
                active_release_id, 'Can not find deployable release. '
                'Current release data {0}'.format(list_release_res))

            # Create an environment
            self.show_step(5)
            cmd = ('fuel env create --name={0} --release={1} '
                   '--nst=tun --json'.format(self.__class__.__name__,
                                             active_release_id[0]))

            env_result = run_on_remote(remote, cmd, jsonify=True)
            cluster_id = env_result['id']
            cluster_name = env_result['name']

            # Update network parameters
            self.show_step(6)
            self.update_cli_network_configuration(cluster_id, remote)

            # Update SSL configuration
            self.show_step(7)
            self.update_ssl_configuration(cluster_id, remote)

            self.show_step(8)
            cmd = 'fuel env --json'
            env_list_res = run_on_remote(remote, cmd, jsonify=True)
            asserts.assert_true(
                cluster_id in [cluster['id'] for cluster in env_list_res],
                'Can not find created before environment'
                ' id in fuel environment list.')
            asserts.assert_true(
                cluster_name in [cluster['name'] for cluster in env_list_res],
                'Can not find cluster name in fuel env command output')

            # Add and provision a controller node
            self.show_step(9)
            logger.info("Add to the cluster and start provisioning "
                        "a controller node [{0}]".format(node_id[0]))
            cmd = ('fuel --env-id={0} node set --node {1} --role=controller'.
                   format(cluster_id, node_id[0]))
            remote.execute(cmd)
            cmd = (
                'fuel --env-id={0} node --provision --node={1} --json'.format(
                    cluster_id, node_id[0]))
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=30 * 60)

            # Deploy the controller node
            self.show_step(10)
            cmd = ('fuel --env-id={0} node --deploy --node {1} --json'.format(
                cluster_id, node_id[0]))
            task = run_on_remote(remote, cmd, jsonify=True)
            self.assert_cli_task_success(task, remote, timeout=60 * 60)

        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity'])
        self.show_step(11)
        with self.env.d_env.get_admin_remote() as remote:
            res = remote.execute(
                'fuel --env {0} env delete'.format(cluster_id))
        asserts.assert_true(res['exit_code'] == 0)

        with self.env.d_env.get_admin_remote() as remote:
            try:
                wait(lambda: remote.execute("fuel env |  awk '{print $1}'"
                                            " |  tail -n 1 | grep '^.$'")[
                                                'exit_code'] == 1,
                     timeout=60 * 10)
            except TimeoutError:
                raise TimeoutError(
                    "cluster {0} was not deleted".format(cluster_id))

        self.env.make_snapshot("review_fuel_cli_one_node_deploy")