Exemplo n.º 1
0
def replace_centos_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with re-builded with review code
    environment - Environment Model object - self.env
    """
    logger.info("Updating bootstrap")
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    try:

        rebuilded_bootstrap = '/var/initramfs.img.updated'
        with environment.d_env.get_admin_remote() as remote:
            checkers.check_file_exists(
                remote,
                '{0}'.format(rebuilded_bootstrap))
            logger.info("Assigning new bootstrap from {}"
                        .format(rebuilded_bootstrap))
            bootstrap = "/var/www/nailgun/bootstrap"
            cmd = ("mv {0}/initramfs.img /var/initramfs.img;"
                   "cp /var/initramfs.img.updated {0}/initramfs.img;"
                   "chmod +r {0}/initramfs.img;"
                   ).format(bootstrap)
            result = remote.execute(cmd)
            assert_equal(result['exit_code'], 0,
                         ('Failed to assign bootstrap {}'
                          ).format(result))
        cmd = "cobbler sync"
        environment.base_actions.execute(cmd, exit_code=0)
    except Exception as e:
        logger.error("Could not update bootstrap {e}".format(e=e))
        raise
Exemplo n.º 2
0
    def upgrade_master_node(self, rollback=False, file_upload=True):
        """This method upgrades master node with current state."""
        # TODO: It will be remooved or changed

        master = self.admin_remote
        if file_upload:
            checkers.upload_tarball(master, hlp_data.TARBALL_PATH, '/var')
            checkers.check_file_exists(
                master,
                os.path.join('/var', os.path.basename(hlp_data.TARBALL_PATH)))
            self.untar(master, os.path.basename(hlp_data.TARBALL_PATH), '/var')

        keystone_pass = hlp_data.KEYSTONE_CREDS['password']
        checkers.run_upgrade_script(master,
                                    '/var',
                                    'upgrade.sh',
                                    password=keystone_pass,
                                    rollback=rollback,
                                    exit_code=255 if rollback else 0)
        if not rollback:
            checkers.wait_upgrade_is_done(master,
                                          3000,
                                          phrase='***UPGRADING MASTER NODE'
                                          ' DONE SUCCESSFULLY')
            checkers.check_upgraded_containers(master,
                                               hlp_data.UPGRADE_FUEL_FROM,
                                               hlp_data.UPGRADE_FUEL_TO)
        elif rollback:
            checkers.wait_rollback_is_done(master, 3000)
            checkers.check_upgraded_containers(master,
                                               hlp_data.UPGRADE_FUEL_TO,
                                               hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
Exemplo n.º 3
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            pacemaker_attributes = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

            controller_space_on_root = get_pacemaker_nodes_attributes(
                pacemaker_attributes)[self.primary_controller_fqdn][
                'root_free']

            logger.info(
                "Free space in root on primary controller - {}".format(
                    controller_space_on_root
                ))

            controller_space_to_filled = str(
                int(
                    controller_space_on_root
                ) - self.rabbit_disk_free_limit - 1)

            logger.info(
                "Need to fill space on root - {}".format(
                    controller_space_to_filled
                ))

            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile2'.format(
                    controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile2')
Exemplo n.º 4
0
def replace_centos_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with re-builded with review code
    environment - Environment Model object - self.env
    """
    logger.info("Updating bootstrap")
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist".format(settings.UPDATE_FUEL))
    try:

        rebuilded_bootstrap = '/var/initramfs.img.updated'
        with environment.d_env.get_admin_remote() as remote:
            checkers.check_file_exists(remote,
                                       '{0}'.format(rebuilded_bootstrap))
            logger.info(
                "Assigning new bootstrap from {}".format(rebuilded_bootstrap))
            bootstrap = "/var/www/nailgun/bootstrap"
            cmd = ("mv {0}/initramfs.img /var/initramfs.img;"
                   "cp /var/initramfs.img.updated {0}/initramfs.img;"
                   "chmod +r {0}/initramfs.img;").format(bootstrap)
            result = remote.execute(cmd)
            assert_equal(result['exit_code'], 0,
                         ('Failed to assign bootstrap {}').format(result))
        cmd = "cobbler sync"
        environment.base_actions.execute(cmd, exit_code=0)
    except Exception as e:
        logger.error("Could not update bootstrap {e}".format(e=e))
        raise
Exemplo n.º 5
0
    def upgrade_master_node(self, rollback=False, file_upload=True):
        """This method upgrades master node with current state."""
        # TODO: It will be remooved or changed

        master = self.admin_remote
        if file_upload:
            checkers.upload_tarball(master, hlp_data.TARBALL_PATH, '/var')
            checkers.check_file_exists(master,
                                       os.path.join(
                                           '/var',
                                           os.path.basename(hlp_data.
                                                            TARBALL_PATH)))
            self.untar(master, os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')

        keystone_pass = hlp_data.KEYSTONE_CREDS['password']
        checkers.run_upgrade_script(master, '/var', 'upgrade.sh',
                                    password=keystone_pass,
                                    rollback=rollback,
                                    exit_code=255 if rollback else 0)
        if not rollback:
            checkers.wait_upgrade_is_done(master, 3000,
                                          phrase='***UPGRADING MASTER NODE'
                                                 ' DONE SUCCESSFULLY')
            checkers.check_upgraded_containers(master,
                                               hlp_data.UPGRADE_FUEL_FROM,
                                               hlp_data.UPGRADE_FUEL_TO)
        elif rollback:
            checkers.wait_rollback_is_done(master, 3000)
            checkers.check_upgraded_containers(master,
                                               hlp_data.UPGRADE_FUEL_TO,
                                               hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
Exemplo n.º 6
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            pacemaker_attributes = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

            controller_space_on_root = get_pacemaker_nodes_attributes(
                pacemaker_attributes)[
                    self.primary_controller_fqdn]['root_free']

            logger.info("Free space in root on primary controller - {}".format(
                controller_space_on_root))

            controller_space_to_filled = str(
                int(controller_space_on_root) - self.rabbit_disk_free_limit -
                1)

            logger.info("Need to fill space on root - {}".format(
                controller_space_to_filled))

            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile2'.format(
                    controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile2')
Exemplo n.º 7
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        node = self.fuel_web.get_nailgun_node_by_name(
            self.primary_controller.name)
        pacemaker_attributes = self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='cibadmin --query --scope status'
        )['stdout_str']
        controller_space_on_root = get_pacemaker_nodes_attributes(
            pacemaker_attributes)[self.primary_controller_fqdn]['root_free']

        logger.info("Free space in root on primary controller - {}".format(
                    controller_space_on_root))

        controller_space_to_filled = str(
            int(controller_space_on_root) - self.rabbit_disk_free_limit - 1
        )

        logger.info("Need to fill space on root - {}".format(
            controller_space_to_filled))

        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='fallocate -l {}M /root/bigfile2'.format(
                controller_space_to_filled)
        )
        check_file_exists(node['ip'], '/root/bigfile2')
    def upgrade_detach_plugin_restore(self):
        """Reinstall Fuel and restore data with cluster with detach-db plugin

        Scenario:
        1. Revert "upgrade_detach_plugin_backup" snapshot
        2. Reinstall Fuel master using iso given in ISO_PATH
        3. Install fuel-octane package
        4. Upload the backup back to reinstalled Fuel maser node
        5. Restore master node using 'octane fuel-restore'
        6. Ensure that plugin were restored
        7. Verify networks for restored cluster
        8. Run OSTF for restored cluster

        Snapshot: upgrade_detach_plugin_restore
        Duration: TODO
        """
        assert_true(os.path.exists(self.repos_local_path))
        assert_true(os.path.exists(self.local_path))

        self.check_run(self.snapshot_name)
        self.show_step(1, initialize=True)
        assert_true(
            self.env.revert_snapshot(self.source_snapshot_name),
            "The test can not use given environment - snapshot "
            "{!r} does not exists".format(self.source_snapshot_name))

        cluster_id = self.fuel_web.get_last_created_cluster()

        self.show_step(2)
        self.env.reinstall_master_node()
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.do_restore(self.backup_path, self.local_path,
                        self.repos_backup_path, self.repos_local_path)
        self.fuel_web.change_default_network_settings()
        self.env.sync_time()

        self.show_step(6)
        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        assert_true('detach-database' in attr['editable'],
                    "Can't find plugin data in cluster attributes!")
        stdout = run_on_remote(
            self.admin_remote,
            "find /var/www/nailgun/plugins/ -name detach-database*")
        assert_not_equal(len(stdout), 0, "Can not find plugin's directory")
        plugin_dir = stdout[0].strip()

        checkers.check_file_exists(self.admin_remote,
                                   os.path.join(plugin_dir, "metadata.yaml"))

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(8)
        self.fuel_web.run_ostf(cluster_id)

        self.env.make_snapshot(self.snapshot_name, is_make=True)
        self.cleanup()
Exemplo n.º 9
0
def patch_and_assemble_ubuntu_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with newly_builded from review
    environment - Environment Model object - self.env
    """
    logger.info("Update fuel-agent code and assemble new ubuntu bootstrap")
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist".format(settings.UPDATE_FUEL))
    try:
        pack_path = '/var/www/nailgun/fuel-agent-review/'
        with environment.d_env.get_admin_remote() as remote:
            remote.upload(settings.FUEL_AGENT_REPO_PATH.rstrip('/'), pack_path)
            # renew code in bootstrap

            # Step 1 - install squashfs-tools
            cmd = ("yum install -y squashfs-tools")
            result = remote.execute(cmd)
            assert_equal(
                result['exit_code'], 0,
                ('Failed to install squashfs-tools {}').format(result))

            # Step 2 - unpack bootstrap
            bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
            bootstrap_var = "/var/root.squashfs"

            cmd = ("unsquashfs -d /var/root.squashfs {}/root.squashfs"
                   ).format(bootstrap)
            result = remote.execute(cmd)
            assert_equal(result['exit_code'], 0,
                         ('Failed to add unpack bootstrap {}').format(result))

            # Step 3 - replace fuel-agent code in unpacked bootstrap
            agent_path = "/usr/lib/python2.7/dist-packages/fuel_agent"
            bootstrap_file = bootstrap + "/root.squashfs"
            cmd = ("rsync -r {2}fuel_agent/* {0}{1}/;"
                   "mv {3} /var/root.squashfs.old;").format(
                       bootstrap_var, agent_path, pack_path, bootstrap_file)

            result = remote.execute(cmd)
            assert_equal(
                result['exit_code'], 0,
                ('Failed to replace fuel-agent code {}').format(result))

            # Step 4 - assemble new bootstrap
            compression = "-comp xz"
            no_progress_bar = "-no-progress"
            no_append = "-noappend"
            image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
                bootstrap_var, bootstrap_file, compression, no_progress_bar,
                no_append)
            result = remote.execute(image_rebuild)
            assert_equal(result['exit_code'], 0,
                         ('Failed to rebuild bootstrap {}').format(result))

            checkers.check_file_exists(remote, '{0}'.format(bootstrap_file))
    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Exemplo n.º 10
0
def patch_and_assemble_ubuntu_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with newly_builded from review
    environment - Environment Model object - self.env
    """
    logger.info("Update fuel-agent code and assemble new ubuntu bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    try:
        pack_path = '/var/www/nailgun/fuel-agent-review/'
        ssh.upload_to_remote(
            ip=ssh.admin_ip,
            source=settings.FUEL_AGENT_REPO_PATH.rstrip('/'),
            target=pack_path)
        # renew code in bootstrap

        # Step 1 - install squashfs-tools
        cmd = "yum install -y squashfs-tools"
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 2 - unpack bootstrap
        bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
        bootstrap_var = "/var/root.squashfs"

        cmd = "unsquashfs -d /var/root.squashfs {}/root.squashfs".format(
            bootstrap)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 3 - replace fuel-agent code in unpacked bootstrap
        agent_path = "/usr/lib/python2.7/dist-packages/fuel_agent"
        bootstrap_file = bootstrap + "/root.squashfs"
        cmd = ("rsync -r {2}fuel_agent/* {0}{1}/;"
               "mv {3} /var/root.squashfs.old;"
               ).format(bootstrap_var, agent_path, pack_path, bootstrap_file)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 4 - assemble new bootstrap
        compression = "-comp xz"
        no_progress_bar = "-no-progress"
        no_append = "-noappend"
        image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
            bootstrap_var,
            bootstrap_file,
            compression,
            no_progress_bar,
            no_append)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
        with environment.d_env.get_admin_remote() as remote:
            checkers.check_file_exists(remote, '{0}'.format(bootstrap_file))
    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Exemplo n.º 11
0
def inject_nailgun_agent_ubuntu_bootstrap(environment):
    """Inject nailgun agent packet from review into ubuntu bootsrap
    environment - Environment Model object - self.env
    """
    logger.info("Update nailgun-agent code and assemble new ubuntu bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    pack_path = '/var/www/nailgun/nailgun-agent-review/'
    # Step 1 - install squashfs-tools
    cmd = "yum install -y squashfs-tools"
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 2 - unpack bootstrap
    bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
    bootstrap_var = "/var/root.squashfs"

    cmd = "unsquashfs -d /var/root.squashfs {}/root.squashfs".format(
        bootstrap)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 3 - replace nailgun-agent code in unpacked bootstrap
    agent_path = "/usr/bin/nailgun-agent"
    bootstrap_file = bootstrap + "/root.squashfs"
    logger.info('bootsrap file {0}{1}'.format(bootstrap_var, agent_path))
    old_sum = get_sha_sum('{0}{1}'.format(bootstrap_var, agent_path))
    logger.info('Old sum is {0}'.format(old_sum))
    cmd_etc_sync = ('rsync -r {1}etc/* {0}/etc/'.format(
        bootstrap_var, pack_path))
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd_etc_sync)
    cmd = ("rsync -r {1}usr/* {0}/usr/;" "mv {2} "
           "/var/root.squashfs.old;"
           "").format(bootstrap_var, pack_path, bootstrap_file)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
    new_sum = get_sha_sum('{0}{1}'.format(bootstrap_var, agent_path))
    logger.info('new sum is {0}'.format(new_sum))
    assert_equal(new_sum != old_sum, True)

    # Step 4 - assemble new bootstrap
    compression = "-comp xz"
    no_progress_bar = "-no-progress"
    no_append = "-noappend"
    image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
        bootstrap_var,
        bootstrap_file,
        compression,
        no_progress_bar,
        no_append)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
    checkers.check_file_exists(ssh.admin_ip, bootstrap_file)
Exemplo n.º 12
0
    def fill_root_above_rabbit_disk_free_limit(self):
        """Filling root filesystem on primary controller"""

        logger.info("Free space in root on primary controller - {}".format(
            self.primary_controller_space_on_root))

        logger.info("Need to fill space on root - {}".format(
            self.primary_controller_space_to_filled))

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:
            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile'.format(
                    self.primary_controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile')
Exemplo n.º 13
0
def patch_and_assemble_ubuntu_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with newly_builded from review
    environment - Environment Model object - self.env
    """
    logger.info("Update fuel-agent code and assemble new ubuntu bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist".format(settings.UPDATE_FUEL))
    try:
        pack_path = '/var/www/nailgun/fuel-agent-review/'
        ssh.upload_to_remote(ip=ssh.admin_ip,
                             source=settings.FUEL_AGENT_REPO_PATH.rstrip('/'),
                             target=pack_path)
        # renew code in bootstrap

        # Step 1 - install squashfs-tools
        cmd = "yum install -y squashfs-tools"
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 2 - unpack bootstrap
        bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
        bootstrap_var = "/var/root.squashfs"

        cmd = "unsquashfs -d /var/root.squashfs {}/root.squashfs".format(
            bootstrap)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 3 - replace fuel-agent code in unpacked bootstrap
        agent_path = "/usr/lib/python2.7/dist-packages/fuel_agent"
        bootstrap_file = bootstrap + "/root.squashfs"
        cmd = ("rsync -r {2}fuel_agent/* {0}{1}/;"
               "mv {3} /var/root.squashfs.old;").format(
                   bootstrap_var, agent_path, pack_path, bootstrap_file)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 4 - assemble new bootstrap
        compression = "-comp xz"
        no_progress_bar = "-no-progress"
        no_append = "-noappend"
        image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
            bootstrap_var, bootstrap_file, compression, no_progress_bar,
            no_append)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
        checkers.check_file_exists(ssh.admin_ip, '{0}'.format(bootstrap_file))
    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Exemplo n.º 14
0
    def fill_root_above_rabbit_disk_free_limit(self):
        """Filling root filesystem on primary controller"""

        logger.info("Free space in root on primary controller - {}".format(
            self.primary_controller_space_on_root))

        logger.info("Need to fill space on root - {}".format(
            self.primary_controller_space_to_filled))

        node = self.fuel_web.get_nailgun_node_by_name(
            self.primary_controller.name)
        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='fallocate -l {}M /root/bigfile'.format(
                self.primary_controller_space_to_filled))
        check_file_exists(node['ip'], '/root/bigfile')
Exemplo n.º 15
0
def inject_nailgun_agent_ubuntu_bootstrap(environment):
    """Inject nailgun agent packet from review into ubuntu bootsrap
    environment - Environment Model object - self.env
    """
    logger.info("Update nailgun-agent code and assemble new ubuntu bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist".format(settings.UPDATE_FUEL))
    pack_path = '/var/www/nailgun/nailgun-agent-review/'
    # Step 1 - install squashfs-tools
    cmd = "yum install -y squashfs-tools"
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 2 - unpack bootstrap
    bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
    bootstrap_var = "/var/root.squashfs"

    cmd = "unsquashfs -d /var/root.squashfs {}/root.squashfs".format(bootstrap)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 3 - replace nailgun-agent code in unpacked bootstrap
    agent_path = "/usr/bin/nailgun-agent"
    bootstrap_file = bootstrap + "/root.squashfs"
    logger.info('bootsrap file {0}{1}'.format(bootstrap_var, agent_path))
    old_sum = get_sha_sum('{0}{1}'.format(bootstrap_var, agent_path))
    logger.info('Old sum is {0}'.format(old_sum))
    cmd_etc_sync = ('rsync -r {1}etc/* {0}/etc/'.format(
        bootstrap_var, pack_path))
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd_etc_sync)
    cmd = ("rsync -r {1}usr/* {0}/usr/;"
           "mv {2} "
           "/var/root.squashfs.old;"
           "").format(bootstrap_var, pack_path, bootstrap_file)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
    new_sum = get_sha_sum('{0}{1}'.format(bootstrap_var, agent_path))
    logger.info('new sum is {0}'.format(new_sum))
    assert_equal(new_sum != old_sum, True)

    # Step 4 - assemble new bootstrap
    compression = "-comp xz"
    no_progress_bar = "-no-progress"
    no_append = "-noappend"
    image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
        bootstrap_var, bootstrap_file, compression, no_progress_bar, no_append)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
    checkers.check_file_exists(ssh.admin_ip, bootstrap_file)
Exemplo n.º 16
0
    def upgrade_ha_ceph_for_all_ubuntu_neutron_vlan(self):
        """Upgrade master node ha mode, ceph for all, neutron vlan

        Scenario:
            1. Revert snapshot with ha mode, ceph for all, neutron vlan env
            2. Run upgrade on master
            3. Check that upgrade was successful

        """
        if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE:
            raise SkipTest()

        self.check_run('upgrade_ha_ceph_for_all_ubuntu_neutron_vlan')
        self.env.revert_snapshot("ha_ceph_for_all_ubuntu_neutron_vlan")

        cluster_id = self.fuel_web.get_last_created_cluster()

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(remote,
                                    hlp_data.TARBALL_PATH, '/var')
            checkers.check_file_exists(remote,
                                       os.path.join('/var',
                                                    os.path.basename(
                                                        hlp_data.TARBALL_PATH))
                                       )
            checkers.untar(remote,
                           os.path.basename(hlp_data.
                                            TARBALL_PATH), '/var')
            checkers.run_script(remote,
                                '/var', 'upgrade.sh',
                                password=hlp_data.KEYSTONE_CREDS['password'])
            checkers.wait_upgrade_is_done(remote, 3000,
                                          phrase='*** UPGRADING MASTER NODE'
                                                 ' DONE SUCCESSFULLY')
            checkers.check_upgraded_containers(remote,
                                               hlp_data.UPGRADE_FUEL_FROM,
                                               hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:6])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()

        self.env.make_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan",
                               is_make=True)
Exemplo n.º 17
0
    def fill_root_above_rabbit_disk_free_limit(self):
        """Filling root filesystem on primary controller"""

        logger.info(
            "Free space in root on primary controller - {}".format(
                self.primary_controller_space_on_root
            ))

        logger.info(
            "Need to fill space on root - {}".format(
                self.primary_controller_space_to_filled
            ))

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:
            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile'.format(
                    self.primary_controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile')
Exemplo n.º 18
0
    def upgrade_ha_ceph_for_all_ubuntu_neutron_vlan(self):
        """Upgrade master node ha mode, ceph for all, neutron vlan

        Scenario:
            1. Revert snapshot with ha mode, ceph for all, neutron vlan env
            2. Run upgrade on master
            3. Check that upgrade was successful

        """
        if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE:
            raise SkipTest()

        self.check_run('upgrade_ha_ceph_for_all_ubuntu_neutron_vlan')
        self.env.revert_snapshot("ha_ceph_for_all_ubuntu_neutron_vlan")

        cluster_id = self.fuel_web.get_last_created_cluster()

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(remote, hlp_data.TARBALL_PATH, '/var')
            checkers.check_file_exists(
                remote,
                os.path.join('/var', os.path.basename(hlp_data.TARBALL_PATH)))
            checkers.untar(remote, os.path.basename(hlp_data.TARBALL_PATH),
                           '/var')
            checkers.run_script(remote,
                                '/var',
                                'upgrade.sh',
                                password=hlp_data.KEYSTONE_CREDS['password'])
            checkers.wait_upgrade_is_done(remote,
                                          3000,
                                          phrase='*** UPGRADING MASTER NODE'
                                          ' DONE SUCCESSFULLY')
            checkers.check_upgraded_containers(remote,
                                               hlp_data.UPGRADE_FUEL_FROM,
                                               hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:6])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()

        self.env.make_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan",
                               is_make=True)
    def octane_action(self, action, path=None):
        assert_true(action in self.OCTANE_COMMANDS.keys(),
                    "Unknown octane action '{}', aborting".format(action))
        octane_cli_args = {
            'path': path,
            'pwd': settings.KEYSTONE_CREDS['password']
        }
        if 'backup' in action:
            # pylint: disable=no-member
            assert_false(self.admin_remote.exists(path),
                         'File already exists, not able to reuse')
            # pylint: enable=no-member
        elif 'restore' in action:
            checkers.check_file_exists(self.admin_remote, path)

        run_on_remote(self.admin_remote,
                      self.OCTANE_COMMANDS[action].format(**octane_cli_args))

        if 'backup' in action:
            checkers.check_file_exists(self.admin_remote, path)
Exemplo n.º 20
0
    def fill_root_above_rabbit_disk_free_limit(self):
        """Filling root filesystem on primary controller"""

        logger.info(
            "Free space in root on primary controller - {}".format(
                self.primary_controller_space_on_root
            ))

        logger.info(
            "Need to fill space on root - {}".format(
                self.primary_controller_space_to_filled
            ))

        node = self.fuel_web.get_nailgun_node_by_name(
            self.primary_controller.name)
        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='fallocate -l {}M /root/bigfile'.format(
                self.primary_controller_space_to_filled)
        )
        check_file_exists(node['ip'], '/root/bigfile')
Exemplo n.º 21
0
def replace_centos_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with re-builded with review code
    environment - Environment Model object - self.env
    """
    logger.info("Updating bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    rebuilded_bootstrap = '/var/initramfs.img.updated'
    with environment.d_env.get_admin_remote() as remote:
        checkers.check_file_exists(
            remote,
            '{0}'.format(rebuilded_bootstrap))
    logger.info("Assigning new bootstrap from {}".format(rebuilded_bootstrap))
    bootstrap = "/var/www/nailgun/bootstrap"
    cmd = ("mv {0}/initramfs.img /var/initramfs.img;"
           "cp /var/initramfs.img.updated {0}/initramfs.img;"
           "chmod +r {0}/initramfs.img;").format(bootstrap)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
    cmd = "cobbler sync"
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
Exemplo n.º 22
0
def replace_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with newly_builded from review
    environment - Environment Model object - self.env
    """
    logger.info("Updating bootstrap")
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    try:
        pack_path = '/var/www/nailgun/fuel-agent/'
        with environment.d_env.get_admin_remote() as remote:
            remote.upload(settings.UPDATE_FUEL_PATH.rstrip('/'),
                          pack_path)
        logger.info("Assigning new bootstrap from {}"
                    .format(pack_path))
        bootstrap = "/var/www/nailgun/bootstrap"
        cmd = ("rm {0}/initramfs.img;"
               "cp {1}/initramfs.img.updated {0}/initramfs.img;"
               "chmod +r {0}/initramfs.img;"
               ).format(bootstrap, pack_path)
        with environment.d_env.get_admin_remote() as remote:
            checkers.check_file_exists(
                remote,
                '{0}initramfs.img.updated'.format(pack_path))
            result = remote.execute(cmd)
            assert_equal(result['exit_code'], 0,
                         ('Failed to assign bootstrap {}'
                          ).format(result))
        cmd = "cobbler sync"
        container = "cobbler"
        environment.base_actions.execute_in_container(
            cmd, container, exit_code=0)
    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Exemplo n.º 23
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        node = self.fuel_web.get_nailgun_node_by_name(
            self.primary_controller.name)
        pacemaker_attributes = self.ssh_manager.execute_on_remote(
            ip=node['ip'], cmd='cibadmin --query --scope status')['stdout_str']
        controller_space_on_root = get_pacemaker_nodes_attributes(
            pacemaker_attributes)[self.primary_controller_fqdn]['root_free']

        logger.info("Free space in root on primary controller - {}".format(
            controller_space_on_root))

        controller_space_to_filled = str(
            int(controller_space_on_root) - self.rabbit_disk_free_limit - 1)

        logger.info("Need to fill space on root - {}".format(
            controller_space_to_filled))

        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='fallocate -l {}M /root/bigfile2'.format(
                controller_space_to_filled))
        check_file_exists(node['ip'], '/root/bigfile2')
Exemplo n.º 24
0
    def upgrade_ha_one_controller_delete_node(self):
        """Upgrade ha 1 controller deployed cluster with ceph and
           delete node from old cluster

        Scenario:
            1. Revert ceph_ha_one_controller_compact snapshot
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Delete one compute+ceph node
            7. Re-deploy cluster
            8. Run OSTF

        """
        if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
            raise SkipTest()
        self.env.revert_snapshot('ceph_ha_one_controller_compact')

        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        remote_ceph = self.fuel_web.get_ssh_for_node('slave-03')
        self.fuel_web.prepare_ceph_to_delete(remote_ceph)
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute', 'ceph-osd']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        try:
            wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
                 timeout=5 * 60)
        except TimeoutError:
            assert_true(len(self.fuel_web.client.list_nodes()) == 3,
                        'Node {0} is not discovered in timeout 10 *60'.format(
                            nodes[0]))
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'],
                               should_fail=1)
        self.env.make_snapshot("upgrade_ha_one_controller_delete_node")
Exemplo n.º 25
0
    def upgrade_second_stage(self):
        """Upgrade master second time with 2 available clusters

        Scenario:
            1. Revert snapshot upgrade_first_stage
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification on both clusters
            5. Run OSTF on both clusters
            6. Add 1 compute node to both clusters and
               re-deploy them one by one
            7. Run network verification on both clusters
            8. Run OSTF on both clusters

        """
        if not self.env.revert_snapshot('upgrade_first_stage'):
            raise SkipTest()

        remote = self.env.d_env.get_admin_remote()
        remote.execute("rm -rf /var/*upgrade*")

        checkers.upload_tarball(remote,
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(remote,
                                   os.path.basename(hlp_data.TARBALL_PATH),
                                   '/var')
        checkers.untar(remote,
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(remote,
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(remote, 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(remote,
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:6])
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[6:8])

        cluster_ids = [cluster['id']
                       for cluster in self.fuel_web.client.list_clusters()]
        for cluster_id in cluster_ids:
            self.fuel_web.verify_network(cluster_id)
            self.fuel_web.run_ostf(cluster_id=cluster_id)
        first_cluster_id = sorted(cluster_ids)[0]
        second_cluster_id = sorted(cluster_ids)[1]
        logger.debug("first cluster id {0}, second cluster id"
                     " {1}".format(first_cluster_id, second_cluster_id))

        self.fuel_web.update_nodes(
            first_cluster_id, {'slave-07': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(first_cluster_id)
        self.fuel_web.verify_network(first_cluster_id)
        self.fuel_web.run_ostf(cluster_id=first_cluster_id)

        self.fuel_web.update_nodes(
            second_cluster_id, {'slave-08': ['compute', 'ceph-osd']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(second_cluster_id)
        self.fuel_web.verify_network(second_cluster_id)
        self.fuel_web.run_ostf(cluster_id=second_cluster_id)
Exemplo n.º 26
0
    def upgrade_first_stage(self):
        """Upgrade ha one controller deployed cluster and deploy new one

        Scenario:
            1. Revert snapshot with neutron ha one controller
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Deploy new ceph ha one controller neutron vlan custer
            7. Run network verification
            8. Run OSTF

        """
        if not self.env.revert_snapshot('prepare_upgrade_env'):
            raise SkipTest()

        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.basename(hlp_data.TARBALL_PATH),
                                   '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:6])
        data = {
            'tenant': 'upgrade_first_stage',
            'user': '******',
            'password': '******',
            'net_provider': 'neutron',
            'net_segment_type': 'vlan',
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False
        }
        cluster_id = self.fuel_web.create_cluster(
            name='first_stage_upgrade',
            mode=hlp_data.DEPLOYMENT_MODE,
            settings=data,
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['compute', 'ceph-osd'],
                'slave-06': ['compute', 'ceph-osd']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("upgrade_first_stage", is_make=True)
Exemplo n.º 27
0
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as admin_remote:
            # copy plugin to the master node
            checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
            checkers.upload_tarball(
                admin_remote,
                EXAMPLE_PLUGIN_V3_PATH,
                '/var')
            # install plugin
            checkers.install_plugin_check_code(
                admin_remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE
            }
        )

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        with self.env.fuel_web.get_ssh_for_node('slave-01') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_sh')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_puppet')

            # check if fuel_plugin_example_v3_puppet called
            # between netconfig and connectivity_tests
            netconfig_str = 'MODULAR: netconfig.pp'
            plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
            connect_str = 'MODULAR: connectivity_tests.pp'
            checkers.check_log_lines_order(remote,
                                           log_file_path='/var/log/puppet.log',
                                           line_matcher=[netconfig_str,
                                                         plugin_str,
                                                         connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        with self.env.fuel_web.get_ssh_for_node('slave-02') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        with self.env.fuel_web.get_ssh_for_node('slave-03') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_sh')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_puppet')

            # check if service run on slave-03
            logger.debug("Checking service on node {0}".format('slave-03'))

            cmd = 'pgrep -f fuel-simple-service'
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Command {0} failed with error {1}'
                         .format(cmd, res_pgrep['stderr']))
            process_count = len(res_pgrep['stdout'])
            assert_equal(1, process_count,
                         "There should be 1 process 'fuel-simple-service',"
                         " but {0} found {1} processes".format(cmd,
                                                               process_count))

            # curl to service
            cmd_curl = 'curl localhost:8234'
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Command {0} failed with error {1}'
                         .format(cmd_curl, res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3")
Exemplo n.º 28
0
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.check_run("deploy_ha_one_controller_neutron_example_v3")
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_V3_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_V3_PATH)

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_V3_PATH,
                             tar_target='/var')
        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"propagate_task_deploy": True})

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if fuel_plugin_example_v3_puppet called
        # between netconfig and connectivity_tests
        netconfig_str = 'MODULAR: netconfig/netconfig.pp'
        plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
        connect_str = 'MODULAR: netconfig/connectivity_tests.pp'
        checkers.check_log_lines_order(
            ip=slave1['ip'],
            log_file_path='/var/log/puppet.log',
            line_matcher=[netconfig_str, plugin_str, connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if service run on slave-03
        logger.debug("Checking service on node {0}".format('slave-03'))

        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.ssh_manager.execute_on_remote(ip=slave3['ip'],
                                                       cmd=cmd)
        process_count = len(res_pgrep['stdout'])
        assert_equal(
            1, process_count,
            "There should be 1 process 'fuel-simple-service',"
            " but {0} found {1} processes".format(cmd, process_count))

        # curl to service
        cmd_curl = 'curl localhost:8234'
        self.ssh_manager.execute_on_remote(ip=slave3['ip'], cmd=cmd_curl)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3",
                               is_make=True)
Exemplo n.º 29
0
    def build_full_bootstrap(self):
        """Verify than slaves retrieved Ubuntu bootstrap with extra settings

        Scenario:
            1. Revert snapshot ready
            2. Build and activate Ubuntu bootstrap with extra settings
            3. Bootstrap slaves
            4. Verify Ubuntu bootstrap on slaves

        Duration: 20m
        """
        self.env.revert_snapshot("ready")

        with self.env.d_env.get_admin_remote() as remote:
            bootstrap_script = '''\
                #!/bin/bash

                echo "testdata" > /test_bootstrap_script
                apt-get install ipython -y
                '''

            with tempfile.NamedTemporaryFile() as temp_file:
                temp_file.write(textwrap.dedent(bootstrap_script))
                temp_file.flush()
                remote.mkdir("/root/bin")
                remote.upload(temp_file.name, "/root/bin/bootstrap_script.sh")

            remote.mkdir("/root/inject/var/lib/testdir")
            remote.mkdir("/root/inject/var/www/testdir2")

            kernel_cmdline = ["biosdevname=0", "net.ifnames=1", "debug",
                              "ignore_loglevel", "log_buf_len=10M"]

        bootstrap_default_params = \
            self.env.fuel_bootstrap_actions.get_bootstrap_default_config()

        additional_repos = [
            self._get_main_repo(bootstrap_default_params["repos"],
                                "ubuntu", "main"),
            self._get_main_repo(bootstrap_default_params["repos"],
                                "ubuntu", "updates"),
            self._get_main_repo(bootstrap_default_params["repos"],
                                "mos", "main")]

        bootstrap_params = {
            "ubuntu-release": "trusty",
            "repo": ["'deb {0} {1} {2}'".format(repo['uri'],
                                                repo['suite'],
                                                repo['section'])
                     for repo in additional_repos],
            "direct-repo-addr": [self.env.admin_node_ip],
            "script": "/root/bin/bootstrap_script.sh",
            "label": "UbuntuBootstrap",
            "extra-dir": ["/root/inject/"],
            "extend-kopts": "'{0}'".format(" ".join(kernel_cmdline)),
            "kernel-flavor": "linux-generic-lts-saucy-eol-upgrade",
            "output-dir": "/tmp",
            "package": ["fuse", "sshfs"],
        }

        uuid, bootstrap_location = \
            self.env.fuel_bootstrap_actions.build_bootstrap_image(
                **bootstrap_params)
        self.env.fuel_bootstrap_actions.\
            import_bootstrap_image(bootstrap_location)
        self.env.fuel_bootstrap_actions.\
            activate_bootstrap_image(uuid)

        nodes = self.env.d_env.get_nodes(
            name__in=["slave-01", "slave-02", "slave-03"])
        self.env.bootstrap_nodes(nodes)

        for node in nodes:
            with self.fuel_web.get_ssh_for_node(node.name) as slave_remote:
                checkers.verify_bootstrap_on_node(slave_remote,
                                                  os_type="ubuntu",
                                                  uuid=uuid)

                for package in ['ipython', 'fuse', 'sshfs']:
                    package_version = checkers.get_package_versions_from_node(
                        slave_remote, name=package, os_type="Ubuntu")
                    assert_not_equal(package_version, "",
                                     "Package {0} is not installed on slave "
                                     "{1}".format(package, node.name))

                for injected_dir in ["/var/lib/testdir", "/var/www/testdir2"]:
                    checkers.check_file_exists(slave_remote, injected_dir)

                file_content = \
                    slave_remote.execute("cat /test_bootstrap_script")
                assert_equal("".join(file_content["stdout"]).strip(),
                             "testdata")

                actual_kernel_cmdline = "".join(
                    slave_remote.execute("cat /proc/cmdline")["stdout"])

                for kernel_opt in kernel_cmdline:
                    assert_true(kernel_opt in actual_kernel_cmdline,
                                "No {0} option in cmdline: {1}"
                                .format(kernel_opt, actual_kernel_cmdline))
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.check_run("deploy_ha_one_controller_neutron_example_v3")
        checkers.check_plugin_path_env(
            var_name='EXAMPLE_PLUGIN_V3_PATH',
            plugin_path=EXAMPLE_PLUGIN_V3_PATH
        )

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=EXAMPLE_PLUGIN_V3_PATH,
            tar_target='/var'
        )
        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"propagate_task_deploy": True}
        )

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if fuel_plugin_example_v3_puppet called
        # between netconfig and connectivity_tests
        netconfig_str = 'MODULAR: netconfig/netconfig.pp'
        plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
        connect_str = 'MODULAR: netconfig/connectivity_tests.pp'
        checkers.check_log_lines_order(
            ip=slave1['ip'],
            log_file_path='/var/log/puppet.log',
            line_matcher=[netconfig_str,
                          plugin_str,
                          connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if service run on slave-03
        logger.debug("Checking service on node {0}".format('slave-03'))

        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.ssh_manager.execute_on_remote(
            ip=slave3['ip'],
            cmd=cmd
        )
        process_count = len(res_pgrep['stdout'])
        assert_equal(1, process_count,
                     "There should be 1 process 'fuel-simple-service',"
                     " but {0} found {1} processes".format(cmd, process_count))

        # curl to service
        cmd_curl = 'curl localhost:8234'
        self.ssh_manager.execute_on_remote(
            ip=slave3['ip'],
            cmd=cmd_curl
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3",
                               is_make=True)
Exemplo n.º 31
0
    def rollback_automatically_delete_node(self):
        """Rollback automatically ha one controller deployed cluster
           and delete node from cluster

        Scenario:
            1. Revert snapshot with deploy Neutron GRE 6.1 env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Run network verification
            7. Run OSTF
            8. Delete 1 node and re-deploy cluster
            9. Run OSTF

        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")
        cluster_id = self.fuel_web.get_last_created_cluster()

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        # we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute', 'cinder']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        try:
            wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
                 timeout=5 * 60)
        except TimeoutError:
            assert_true(len(self.fuel_web.client.list_nodes()) == 3,
                        'Node {0} is not discovered in timeout 10 *60'.format(
                            nodes[0]))
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'],
                               should_fail=1)

        self.env.make_snapshot("rollback_automatically_delete_node")
Exemplo n.º 32
0
    def upgrade_ha_restart_containers(self):
        """Upgrade ha deployed cluster and restart containers

        Scenario:
            1. Revert snapshot with Neutron GRE HA 6.1 env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run patching and restart containers
            5. Run network verification
            6. Run OSTF
            7. Create new ha cluster with 1 controller Neutron Vlan cluster
            8. Deploy cluster
            9. Run OSTF

        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')

        # Upgrade
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:5])
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        remote = self.env.d_env.get_admin_remote()

        # Patching
        update_command = 'yum update -y'
        update_result = remote.execute(update_command)
        logger.debug('Result of "{1}" command on master node: '
                     '{0}'.format(update_result, update_command))
        assert_equal(int(update_result['exit_code']), 0,
                     'Packages update failed, '
                     'inspect logs for details')

        # Restart containers
        destroy_command = 'dockerctl destroy all'
        destroy_result = remote.execute(destroy_command)
        logger.debug('Result of "{1}" command on master node: '
                     '{0}'.format(destroy_result, destroy_command))
        assert_equal(int(destroy_result['exit_code']), 0,
                     'Destroy containers failed, '
                     'inspect logs for details')

        start_command = 'dockerctl start all'
        start_result = remote.execute(start_command)
        logger.debug('Result of "{1}" command on master node: '
                     '{0}'.format(start_result, start_command))
        assert_equal(int(start_result['exit_code']), 0,
                     'Start containers failed, '
                     'inspect logs for details')
        self.env.docker_actions.wait_for_ready_containers()
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        # Deploy new cluster
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]

        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[5:7])

        new_cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            release_id=added_release[0],
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': 'vlan'
            }
        )
        self.fuel_web.update_nodes(
            new_cluster_id,
            {
                'slave-06': ['controller'],
                'slave-07': ['compute']
            }
        )
        self.fuel_web.run_network_verify(new_cluster_id)
        self.fuel_web.deploy_cluster_wait(new_cluster_id)
        self.fuel_web.run_ostf(new_cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.fuel_web.run_network_verify(new_cluster_id)

        self.env.make_snapshot("upgrade_ha_restart_containers")
Exemplo n.º 33
0
    def rollback_automatically_ha_one_controller(self):
        """Rollback automatically ha one controller deployed cluster

        Scenario:
            1. Revert snapshot with deploy Neutron VXLAN env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Run network verification
            7. Run OSTF
            8. Add 1 ceph node and re-deploy cluster
            9. Run OSTF

        """
        if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
            raise SkipTest()

        self.env.revert_snapshot('ceph_ha_one_controller_compact')
        cluster_id = self.fuel_web.get_last_created_cluster()

        _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
        remote = self.env.d_env.get_ssh_to_remote(_ip)
        expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        # we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['ceph-osd']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = UpgradeFuelMaster.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.make_snapshot("rollback_automatically_ha_one_controller")
Exemplo n.º 34
0
    def rollback_automatically_ha(self):
        """Rollback manually ha deployed cluster

        Scenario:
            1. Revert snapshot with Neutron GRE HA 6.1 env
            2. Add raise exception to openstack.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Run network verification
            7. Run OSTF
            8. Add 1 cinder node and re-deploy cluster
            9. Run OSTF

        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:5])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[5:6])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-06': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.make_snapshot("rollback_automatically_ha")
Exemplo n.º 35
0
    def upgrade_ha_one_controller(self):
        """Upgrade ha one controller deployed cluster with ceph

        Scenario:
            1. Revert snapshot with ha one controller ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Add another compute node
            7. Re-deploy cluster
            8. Run OSTF

        """
        if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
            raise SkipTest()
        self.env.revert_snapshot('ceph_ha_one_controller_compact')

        cluster_id = self.fuel_web.get_last_created_cluster()

        _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
        remote = self.env.d_env.get_ssh_to_remote(_ip)
        expected_kernel = self.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        create_diagnostic_snapshot(
            self.env, "pass", "upgrade_ha_one_controller")

        self.env.make_snapshot("upgrade_ha_one_controller")
Exemplo n.º 36
0
    def upgrade_fuel_after_rollback(self):
        """Upgrade Fuel after rollback and deploy new cluster

        Scenario:
            1. Revert deploy_neutron_gre snapshot with 6.1 env
            2. Upgrade with rollback
            3. Run OSTF
            4. Run network verification
            5. Upgrade fuel master
            6. Check upgrading was successful
            7. Deploy 6.1 cluster with 3 nodes and neutron vlan
            8. Run OSTF for new cluster
            9. Run network verification
        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")

        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        remote = self.env.d_env.get_admin_remote

        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(remote(), hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(remote(), os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')

        # Upgrade with rollback
        keystone_pass = hlp_data.KEYSTONE_CREDS['password']
        checkers.run_script(remote(), '/var', 'upgrade.sh',
                            password=keystone_pass, rollback=True,
                            exit_code=255)
        checkers.wait_rollback_is_done(remote(), 3000)
        checkers.check_upgraded_containers(remote(), hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        # Upgrade fuel master
        checkers.run_script(remote(), '/var', 'upgrade.sh',
                            password=keystone_pass)
        checkers.wait_upgrade_is_done(remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nailgun_upgrade_migration()

        # Deploy new cluster
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]

        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:6])

        new_cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            release_id=added_release[0],
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': hlp_data.NEUTRON_SEGMENT['vlan']
            }
        )
        self.fuel_web.update_nodes(
            new_cluster_id, {
                'slave-04': ['controller'],
                'slave-05': ['compute'],
                'slave-06': ['cinder']
            }
        )
        self.fuel_web.run_network_verify(new_cluster_id)
        self.fuel_web.deploy_cluster_wait(new_cluster_id)
        self.fuel_web.run_ostf(new_cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.fuel_web.run_network_verify(new_cluster_id)

        self.env.make_snapshot("upgrade_fuel_after_rollback")
Exemplo n.º 37
0
    def deploy_ha_after_upgrade(self):
        """Upgrade and deploy new ha cluster

        Scenario:
            1. Revert snapshot with ha 1 controller ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Re-deploy cluster
            7. Run OSTF

        """
        if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
            raise SkipTest()
        self.env.revert_snapshot('ceph_ha_one_controller_compact')

        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:9])
        segment_type = hlp_data.NEUTRON_SEGMENT['vlan']
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type
            },
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['controller'],
                'slave-07': ['compute'],
                'slave-08': ['compute'],
                'slave-09': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            logger.debug("ubuntu kernel version"
                         " on new node is {}".format(kernel))
        self.fuel_web.verify_network(cluster_id=cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.env.make_snapshot("deploy_ha_after_upgrade")
Exemplo n.º 38
0
    def upgrade_ha(self):
        """Upgrade ha deployed cluster

        Scenario:
            1. Revert snapshot with Neutron GRE HA 6.1 env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Create new ha cluster with 1 controller Vlan cluster
            7. Deploy cluster
            8. Run OSTF

        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:5])
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[5:7])
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': 'vlan'
            },
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-06': ['controller'],
                'slave-07': ['compute']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-06')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            logger.debug("ubuntu kernel version"
                         " on new node is {}".format(kernel))
        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.env.make_snapshot("upgrade_ha")
Exemplo n.º 39
0
    def deploy_neutron_tun(self):
        """Deploy cluster in ha mode with 1 controller and Neutron VXLAN

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 2 nodes with compute role
            4. Run network verification
            5. Deploy the cluster
            6. Run network verification
            7. Run OSTF

        Duration 35m
        Snapshot deploy_neutron_tun

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        data = {
            "net_provider": 'neutron',
            "net_segment_type": NEUTRON_SEGMENT['tun'],
            'tenant': 'simpleTun',
            'user': '******',
            'password': '******'
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'cinder'],
                'slave-03': ['compute', 'cinder']
            }
        )
        self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/26',
                                              '192.168.196.1')
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id),
            data['user'], data['password'], data['tenant'])

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        # assert_equal(str(cluster['net_segment_type']), segment_type)
        self.fuel_web.check_fixed_network_cidr(
            cluster_id, os_conn)

        with self.env.d_env.get_admin_remote() as remote:
            checkers.check_file_exists(
                remote, '/usr/share/doc/python-fuelclient-8.0.0/'
                        'fuel_client.yaml')
            checkers.check_client_smoke(remote)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_tun")