def create_and_attach_empty_volume(self):
        """Create and attach to instance empty volume."""
        mount_point = '/dev/sdb'

        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        vol = os_conn.create_volume(availability_zone=self.cinder_az)
        image = os_conn.get_image(self.vmware_image)
        net = os_conn.get_network(self.net_name)
        sg = os_conn.get_security_group(self.sg_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   security_groups=[sg],
                                   net_id=net['id'],
                                   timeout=210)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        logger.info("Attaching volume via cli")
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        cmd = '. openrc; nova volume-attach {srv_id} {volume_id} {mount}' \
              ''.format(srv_id=vm.id, volume_id=vol.id, mount=mount_point)
        logger.debug('CMD: {}'.format(cmd))
        SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)

        helpers.wait(
            lambda: os_conn.get_volume_status(vol) == "in-use",
            timeout=30, timeout_msg="Volume doesn't reach 'in-use' state")

        vm.reboot()
        sleep(10)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = 'sudo /sbin/fdisk -l | grep {}'.format(mount_point)
            res = remote.execute_through_host(
                hostname=floating_ip.ip,
                cmd=cmd,
                auth=cirros_auth
            )
            logger.debug('OUTPUT: {}'.format(res['stdout_str']))
            assert_equal(res['exit_code'], 0, "Attached volume is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
        os_conn.delete_volume(vol)
    def create_instance_with_vmxnet3_adapter(self):
        """Create instance with vmxnet3 adapter."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        image = os_conn.get_image(self.image_name)
        os_conn.update_image(image,
                             properties={"hw_vif_model": "VirtualVmxnet3"})
        flavor = os_conn.get_flavor_by_name('m1.small')
        sg = os_conn.get_security_group(self.sg_name)
        net = os_conn.get_network(self.net_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   net_id=net['id'], security_groups=[sg],
                                   flavor_id=flavor.id, timeout=900)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=210,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = '/usr/bin/lshw -class network | grep vmxnet3'
            res = remote.execute_through_host(
                hostname=floating_ip.ip,
                cmd=cmd,
                auth=self.image_creds
            )
            logger.debug('OUTPUT: {}'.format(res['stdout_str']))
            assert_equal(res['exit_code'], 0, "VMxnet3 driver is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
Beispiel #3
0
    def boot_instance_on_node(self, hypervisor_name, label, boot_timeout=300,
                              need_floating_ip=True):
        instance = self.os_conn.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name), label=label)
        logger.info("New instance {0} created on {1}"
                    .format(instance.id, hypervisor_name))
        ip = self.os_conn.get_nova_instance_ip(instance, net_name=label,
                                               type='fixed')
        logger.info("Instance {0} has IP {1}".format(instance.id, ip))

        if not need_floating_ip:
            return self.os_conn.nova.servers.get(instance.id)

        ip = self.os_conn.assign_floating_ip(instance)
        logger.info("Floating address {0} associated with instance {1}"
                    .format(ip.ip, instance.id))

        logger.info("Wait for ping from instance {}".format(instance.id))
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(ip.ip, 22),
            timeout=boot_timeout,
            timeout_msg=("Instance {0} is unreachable for {1} seconds".
                         format(instance.id, boot_timeout)))

        return self.os_conn.nova.servers.get(instance.id)
Beispiel #4
0
    def simple_flat_create_instance(self):
        """Create instance with file injection

         Scenario:
            1. Revert "simple flat" environment
            2. Create instance with file injection
            3. Assert instance was created
            4. Assert file is on instance

        """
        self.env.revert_snapshot("deploy_simple_flat")
        data = {
            'tenant': 'novaSimpleFlat',
            'user': '******',
            'password': '******'
        }
        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os = os_actions.OpenStackActions(controller['ip'], data['user'],
                                         data['password'], data['tenant'])

        remote = self.env.get_ssh_to_remote_by_name('slave-01')
        remote.execute("echo 'Hello World' > /root/test.txt")
        server_files = {"/root/test.txt": 'Hello World'}
        instance = os.create_server_for_migration(file=server_files)
        floating_ip = os.assign_floating_ip(instance)
        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
        res = os.execute_through_host(
            remote,
            floating_ip.ip, "sudo cat /root/test.txt")
        assert_true(res == 'Hello World', 'file content is {0}'.format(res))
Beispiel #5
0
    def wait_for_slave_provision(node_ip, timeout=10 * 60):
        """Wait for a target node provision.

        :param node_ip: IP address of target node.
        :param timeout: Timeout for wait function.
        """
        wait(lambda: tcp_ping(node_ip, 22), timeout=timeout, timeout_msg="Node doesn't appear in network")
Beispiel #6
0
    def wait_for_slave_network_down(node_ip, timeout=10 * 20):
        """Wait for a target node network down.

        :param node_ip: IP address of target node.
        :param timeout: Timeout for wait function.
        """
        wait(lambda: (not tcp_ping(node_ip, 22)), interval=1, timeout=timeout, timeout_msg="Node doesn't gone offline")
def create_access_point(os_conn, nics, security_groups):
        """Create access point.

        Creating instance with floating ip as access point to instances
        with private ip in the same network.

        :param os_conn: type object, openstack
        :param nics: type dictionary, neutron networks
                     to assign to instance
        :param security_groups: A list of security group names
        """
        # get any available host
        host = os_conn.nova.services.list(binary='nova-compute')[0]
        # create access point server
        access_point = create_instances(
            os_conn=os_conn, nics=nics,
            vm_count=1,
            security_groups=security_groups,
            available_hosts=[host]).pop()

        verify_instance_state(os_conn)

        access_point_ip = os_conn.assign_floating_ip(
            access_point, use_neutron=True)['floating_ip_address']
        wait(lambda: tcp_ping(access_point_ip, 22), timeout=60 * 5, interval=5)
        return access_point, access_point_ip
Beispiel #8
0
    def test_tcp_ping(self, ping):

        host = '127.0.0.1'
        port = 65535
        timeout = 1

        result = helpers.tcp_ping(host, port, timeout)
        ping.assert_called_once_with(host, port, timeout)
        self.assertTrue(result)

        ping.reset_mock()
        ping.side_effect = socket.error

        result = helpers.tcp_ping(host, port, timeout)
        ping.assert_called_once_with(host, port, timeout)
        self.assertFalse(result)
Beispiel #9
0
    def find_nessus_address(self,
                            nessus_net_name='admin',
                            nessus_port=8834):
        admin_net_cidr = \
            self.env.d_env.get_network(name=nessus_net_name).ip_network

        for address in netaddr.IPNetwork(admin_net_cidr).iter_hosts():
            if tcp_ping(address.format(), nessus_port):
                return address.format()
    def wait_for_slave_provision(node_ip, timeout=10 * 60):
        """Wait for a target node provision.

        :param node_ip: IP address of target node.
        :param timeout: Timeout for wait function.
        """
        wait(lambda: tcp_ping(node_ip, 22),
             timeout=timeout,
             timeout_msg="Node doesn't appear in network")
    def wait_for_slave_network_down(node_ip, timeout=10 * 20):
        """Wait for a target node network down.

        :param node_ip: IP address of target node.
        :param timeout: Timeout for wait function.
        """
        wait(lambda: (not tcp_ping(node_ip, 22)),
             interval=1,
             timeout=timeout,
             timeout_msg="Node doesn't gone offline")
    def wait_node_is_offline(self, node_ip, timeout):
        """Wait node is shutdown and doesn't respond

        """
        helpers.wait(
            lambda: not helpers.tcp_ping(node_ip, 22),
            timeout=timeout,
            timeout_msg="Node '{}' didn't go offline after {} sec".format(
                node_ip, timeout
            )
        )
    def test_mysql_is_running(self):
        """Start container from image, check if mysql is running

        Scenario:
            3. Check port 3306

        """
        LOG.info("Trying to reach port 3306")
        helpers.wait(lambda: helpers.tcp_ping('localhost', 33306),
                     timeout=30,
                     timeout_msg="MySQL port in not reacheble.")
    def test_mysql_is_running(self):
        """Start container from image, check if mysql is running

        Scenario:
            3. Check port 3306

        """
        LOG.info("Trying to reach port 3306")
        helpers.wait(lambda: helpers.tcp_ping('localhost', 33306),
                     timeout=30,
                     timeout_msg="MySQL port in not reacheble.")
    def wait_node_is_online(self, node_ip, timeout):
        """Wait node is online after starting

        """
        helpers.wait(
            lambda: helpers.tcp_ping(node_ip, 22),
            timeout=timeout,
            timeout_msg="Node '{}' didn't become online after {} sec".format(
                node_ip, timeout
            )
        )
Beispiel #16
0
 def test_puppetmaster_alive(self):
     wait(
         lambda: tcp_ping(self.get_admin_node_ip(), 8140),
         timeout=5
     )
     ps_output = self.remote().execute('ps ax')['stdout']
     pm_processes = filter(
         lambda x: '/usr/sbin/puppetmasterd' in x,
         ps_output
     )
     logging.debug("Found puppet master processes: %s" % pm_processes)
     self.assertEquals(len(pm_processes), 4)
 def test_puppetmaster_alive(self):
     wait(
         lambda: tcp_ping(self.get_admin_node_ip(), 8140),
         timeout=5
     )
     ps_output = self.remote().execute('ps ax')['stdout']
     pm_processes = filter(
         lambda x: '/usr/sbin/puppetmasterd' in x,
         ps_output
     )
     logging.debug("Found puppet master processes: %s" % pm_processes)
     self.assertEquals(len(pm_processes), 4)
Beispiel #18
0
 def warm_start_nodes(self, devops_nodes):
     LOGGER.info("Starting nodes " "{0}".format([n.name for n in devops_nodes]))
     for node in devops_nodes:
         node.start()
     for node in devops_nodes:
         ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
         try:
             self.wait_for_slave_provision(ip)
         except TimeoutError:
             asserts.assert_true(
                 tcp_ping(ip, 22), "Node {0} has not become online " "after warm start".format(node.name)
             )
         LOGGER.debug("Node {0} became online.".format(node.name))
 def warm_start_nodes(self, devops_nodes):
     logger.info('Starting nodes '
                 '{0}'.format([n.name for n in devops_nodes]))
     for node in devops_nodes:
         node.start()
     for node in devops_nodes:
         ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
         try:
             self.wait_for_slave_provision(ip)
         except TimeoutError:
             asserts.assert_true(
                 tcp_ping(ip, 22), 'Node {0} has not become online '
                 'after warm start'.format(node.name))
         logger.info('Node {0} became online.'.format(node.name))
Beispiel #20
0
 def warm_start_nodes(self, devops_nodes):
     logger.info('Starting nodes '
                 '{0}'.format([n.name for n in devops_nodes]))
     for node in devops_nodes:
         node.start()
     for node in devops_nodes:
         ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
         try:
             self.wait_for_slave_provision(ip)
         except TimeoutError:
             asserts.assert_true(
                 tcp_ping(ip, 22),
                 'Node {0} has not become online '
                 'after warm start'.format(node.name))
         logger.info('Node {0} became online.'.format(node.name))
Beispiel #21
0
    def check_slaves_are_ready(self):
        devops_nodes = [node for node in self.env.d_env.nodes().slaves
                        if node.driver.node_active(node)]

        for node in devops_nodes:
            ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
            try:
                self.wait_for_slave_provision(ip)
            except TimeoutError:
                asserts.assert_true(
                    tcp_ping(ip, 22),
                    'Node {0} has not become online '
                    'after revert'.format(node.name))
            logger.debug('Node {0} became online.'.format(node.name))
        return True
Beispiel #22
0
    def find_nessus_address(self,
                            nessus_net_name='admin',
                            nessus_port=8834):
        admin_net_cidr = \
            self.env.d_env.get_network(name=nessus_net_name).ip_network

        logger.info(
            "Scanning network '%s' (%s) for nessus service on port %d",
            nessus_net_name, admin_net_cidr, nessus_port)

        for address in netaddr.IPNetwork(admin_net_cidr).iter_hosts():
            if tcp_ping(address.format(), nessus_port, timeout=1):
                return address.format()

        fail("No running nessus service found!")
    def check_slaves_are_ready(self):
        devops_nodes = [
            node for node in self.env.d_env.nodes().slaves
            if node.driver.node_active(node)
        ]

        for node in devops_nodes:
            ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
            try:
                self.wait_for_slave_provision(ip)
            except TimeoutError:
                asserts.assert_true(
                    tcp_ping(ip, 22), 'Node {0} has not become online '
                    'after revert'.format(node.name))
            logger.debug('Node {0} became online.'.format(node.name))
        return True
Beispiel #24
0
def upload_config(obj, config_path, vsrx_ip):
    """Upload and commit configuration for VSRX."""
    commands = [
        'cli', 'configure',
        'load override {0}'.format(config_path.split('/').pop()), 'commit'
    ]
    wait(lambda: tcp_ping(vsrx_ip, 22),
         timeout=60 * 2,
         interval=10,
         timeout_msg="Node {0} is not accessible by SSH.".format(vsrx_ip))
    with obj.env.d_env.get_ssh_to_remote(vsrx_ip) as remote:
        logger.info('Upload template {0}'.format(config_path))
        remote.upload(config_path, '/cf/root')
        for command in commands:
            logger.info('Execute command {0}.'.format(command))
            remote.execute_async(command)
Beispiel #25
0
    def test_puppet_master_alive(self):
        """Test current installation has correctly working puppet master

        Scenario:
            1. Revert snapshot "empty"
            2. Search for puppet master process on master node

        """
        if OPENSTACK_RELEASE_CENTOS not in OPENSTACK_RELEASE:
            raise SkipTest()
        self.env.revert_snapshot("empty")
        wait(lambda: tcp_ping(self.env.get_admin_node_ip(), 8140), timeout=5)
        ps_output = self.env.get_admin_remote().execute("ps ax")["stdout"]
        pm_processes = filter(lambda x: "/usr/sbin/puppetmasterd" in x, ps_output)
        logging.debug("Found puppet master processes: %s" % pm_processes)
        assert_equal(len(pm_processes), 4)
Beispiel #26
0
    def start(self):
        """Method for start environment

        """
        if self._env is None:
            raise exceptions.EnvironmentIsNotSet()
        self._env.start()
        LOG.info('Environment "{0}" started'.format(self._env.name))
        for node in self._env.get_nodes(role__in=ext.UNDERLAY_NODE_ROLES):
            LOG.info("Waiting for SSH on node '{}...'".format(node.name))
            timeout = 480
            helpers.wait(
                lambda: helpers.tcp_ping(self.node_ip(node), 22),
                timeout=timeout,
                timeout_msg="Node '{}' didn't open SSH in {} sec".format(
                    node.name, timeout))
        LOG.info('Environment "{0}" ready'.format(self._env.name))
    def start(self):
        """Method for start environment

        """
        if self._env is None:
            raise exceptions.EnvironmentIsNotSet()
        self._env.start()
        for node in self.k8s_nodes:
            LOG.debug("Waiting for SSH on node '{}...'".format(node.name))
            timeout = 360
            helpers.wait(
                lambda: helpers.tcp_ping(self.node_ip(node), 22),
                timeout=timeout,
                timeout_msg="Node '{}' didn't open SSH in {} sec".format(
                    node.name, timeout
                )
            )
Beispiel #28
0
    def check_ovs_firewall_functionality(self,
                                         cluster_id,
                                         compute_ip,
                                         dpdk=False):
        """Check firewall functionality

        :param cluster_id: int, cluster id
        :param compute_ip: str, compute ip
        :param dpdk: bool, is DPDK enabled
        """
        flows = self.get_flows(compute_ip)
        if dpdk:
            ifaces = self.get_ovs_bridge_ifaces(compute_ip)
        else:
            ifaces = self.get_ifaces(compute_ip)
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        if dpdk:
            server = self.boot_dpdk_instance(os_conn, cluster_id)
            current_ifaces = self.get_ovs_bridge_ifaces(compute_ip)
        else:
            server = os_conn.create_server_for_migration(label=net_name)
            current_ifaces = self.get_ifaces(compute_ip)
        current_flows = self.get_flows(compute_ip)
        assert_equal(
            len(current_ifaces.stdout) - len(ifaces.stdout), 1,
            "Check is failed:"
            " {}\n\n{}".format(ifaces, current_ifaces))
        assert_not_equal(
            set(flows.stdout), set(current_flows.stdout),
            "Check is failed. Passed data is equal:"
            " {}\n\n{}".format(flows, current_flows))
        float_ip = os_conn.assign_floating_ip(server)
        logger.info("Floating address {0} associated with instance {1}".format(
            float_ip.ip, server.id))

        logger.info("Wait for ping from instance {} "
                    "by floating ip".format(server.id))
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(float_ip.ip, 22),
            timeout=300,
            timeout_msg=("Instance {0} is unreachable for {1} seconds".format(
                server.id, 300)))
        os_conn.delete_instance(server)
Beispiel #29
0
    def start(self, underlay_node_roles, timeout=480):
        """Method for start environment

        """
        if self.__env is None:
            raise exceptions.EnvironmentIsNotSet()
        self.__env.start()
        LOG.info('Environment "{0}" started'.format(self.__env.name))
        for node in self.__env.get_nodes(role__in=underlay_node_roles):
            LOG.info("Waiting for SSH on node '{0}' / {1} ...".format(
                node.name, self.node_ip(node)))
            helpers.wait(
                lambda: helpers.tcp_ping(self.node_ip(node), 22),
                timeout=timeout,
                timeout_msg="Node '{}' didn't open SSH in {} sec".format(
                    node.name, timeout))
        LOG.info('Environment "{0}" ready'.format(self.__env.name))
    def start(self):
        """Method for start environment

        """
        if self._env is None:
            raise exceptions.EnvironmentIsNotSet()
        self._env.start()
        for node in self.k8s_nodes:
            LOG.debug("Waiting for SSH on node '{}...'".format(node.name))
            timeout = 360
            helpers.wait(
                lambda: helpers.tcp_ping(self.node_ip(node), 22),
                timeout=timeout,
                timeout_msg="Node '{}' didn't open SSH in {} sec".format(
                    node.name, timeout
                )
            )
Beispiel #31
0
    def warm_shutdown_nodes(self, devops_nodes):
        LOGGER.info("Shutting down (warm) nodes " "{0}".format([n.name for n in devops_nodes]))
        for node in devops_nodes:
            LOGGER.debug("Shutdown node {0}".format(node.name))
            with self.fuel_web.get_ssh_for_node(node.name) as remote:
                remote.execute("/sbin/shutdown -Ph now & exit")

        for node in devops_nodes:
            ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
            LOGGER.info("Wait a {0} node offline status".format(node.name))
            try:
                self.wait_for_slave_network_down(ip)
            except TimeoutError:
                asserts.assert_false(
                    tcp_ping(ip, 22), "Node {0} has not become " "offline after warm shutdown".format(node.name)
                )
            node.destroy()
    def warm_shutdown_nodes(self, devops_nodes):
        logger.info('Shutting down (warm) nodes '
                    '{0}'.format([n.name for n in devops_nodes]))
        for node in devops_nodes:
            ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
            logger.debug('Shutdown node {0}'.format(node.name))
            self.ssh_manager.execute(ip, '/sbin/shutdown -Ph now & exit')

        for node in devops_nodes:
            ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
            logger.info('Wait a {0} node offline status'.format(node.name))
            try:
                self.wait_for_slave_network_down(ip)
            except TimeoutError:
                asserts.assert_false(
                    tcp_ping(ip, 22), 'Node {0} has not become '
                    'offline after warm shutdown'.format(node.name))
            node.destroy()
    def reset_on_ready_ubuntu_bootstrap(self):
        """Stop reset cluster in HA mode with 1 controller on Ubuntu Bootstrap

        Scenario:
            1. Reset cluster
            2. Verify bootstrap on slaves
            3. Re-deploy cluster
            4. Verify network
            5. Run OSTF

        Duration 30m
        """

        if not self.env.revert_snapshot(
                'deploy_stop_on_deploying_ubuntu_bootstrap'):
            raise SkipTest('Required snapshot not found')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Reset environment,
        # then verify bootstrap on slaves and re-deploy cluster
        self.fuel_web.stop_reset_env_wait(cluster_id)

        nodes = self.env.d_env.get_nodes(
            name__in=["slave-01", "slave-02", "slave-03"])

        self.fuel_web.wait_nodes_get_online_state(nodes, timeout=10 * 60)
        for node in nodes:
            nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(node)
            wait(lambda: tcp_ping(nailgun_node['ip'], 22),
                 timeout=300,
                 timeout_msg=("Node {0} is still unreachable after {1} "
                              "seconds".format(nailgun_node['name'], 300)))
            checkers.verify_bootstrap_on_node(
                nailgun_node['ip'], os_type="ubuntu")

        self.fuel_web.deploy_cluster_wait(cluster_id)

        # Network verification
        self.fuel_web.verify_network(cluster_id)

        # Run ostf
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['smoke'])
    def check_nova_ephemeral_disk(self, os_conn, cluster_id,
                                  hypervisor_name=None, fs_type='ext4'):
        """
        :param os_conn: an object of connection to openstack services
        :param cluster_id: an integer number of cluster id
        :param hypervisor_name: a string of hypervisor name
        :param fs_type: a string of fs type name
        :return:
        """
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        flavor_id = random.randint(10, 10000)
        os_conn.create_flavor(name='ephemeral{0}'.format(flavor_id),
                              ram=64,
                              vcpus=1,
                              disk=1,
                              flavorid=flavor_id,
                              ephemeral=1)

        kwargs = {}
        if hypervisor_name:
            kwargs['availability_zone'] = "nova:{0}".format(hypervisor_name)
        instance = os_conn.create_server_for_migration(
            neutron=True, label=net_name, flavor=flavor_id, **kwargs)

        floating_ip = os_conn.assign_floating_ip(instance)

        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22),
                     timeout=120,
                     timeout_msg="Can not ping instance by floating "
                                 "ip {0}".format(floating_ip.ip))

        creds = ("cirros", "cubswin:)")
        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])[0]['ip']
        with self.env.d_env.get_ssh_to_remote(controller) as remote:
            res = os_conn.execute_through_host(
                remote, floating_ip.ip, "mount", creds)
            asserts.assert_true('/mnt type {0}'.format(fs_type)
                                in res['stdout'],
                                "Ephemeral disk format was not "
                                "changed on instance. "
                                "Please, see details: {0}".format(res))
        os_conn.delete_instance(instance)
Beispiel #35
0
    def check_dpdk_instance_connectivity(self,
                                         os_conn,
                                         cluster_id,
                                         mem_page_size='2048'):
        """Boot VM with HugePages and ping it via floating IP

        :param os_conn: an object of connection to openstack services
        :param cluster_id: an integer number of cluster id
        :param mem_page_size: huge pages size
        :return:
        """

        extra_specs = {'hw:mem_page_size': mem_page_size}

        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        flavor_id = random.randint(10, 10000)
        name = 'system_test-{}'.format(random.randint(10, 10000))
        flavor = os_conn.create_flavor(name=name,
                                       ram=64,
                                       vcpus=1,
                                       disk=1,
                                       flavorid=flavor_id,
                                       extra_specs=extra_specs)

        server = os_conn.create_server_for_migration(neutron=True,
                                                     label=net_name,
                                                     flavor=flavor_id)
        os_conn.verify_instance_status(server, 'ACTIVE')

        float_ip = os_conn.assign_floating_ip(server)
        logger.info("Floating address {0} associated with instance {1}".format(
            float_ip.ip, server.id))

        logger.info("Wait for ping from instance {} "
                    "by floating ip".format(server.id))
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(float_ip.ip, 22),
            timeout=300,
            timeout_msg=("Instance {0} is unreachable for {1} seconds".format(
                server.id, 300)))

        os_conn.delete_instance(server)
        os_conn.delete_flavor(flavor)
Beispiel #36
0
    def reset_on_ready_ubuntu_bootstrap(self):
        """Stop reset cluster in HA mode with 1 controller on Ubuntu Bootstrap

        Scenario:
            1. Reset cluster
            2. Verify bootstrap on slaves
            3. Re-deploy cluster
            4. Verify network
            5. Run OSTF

        Duration 30m
        """

        if not self.env.revert_snapshot(
                'deploy_stop_on_deploying_ubuntu_bootstrap'):
            raise SkipTest('Required snapshot not found')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Reset environment,
        # then verify bootstrap on slaves and re-deploy cluster
        self.fuel_web.stop_reset_env_wait(cluster_id)

        nodes = self.env.d_env.get_nodes(
            name__in=["slave-01", "slave-02", "slave-03"])

        self.fuel_web.wait_nodes_get_online_state(nodes, timeout=10 * 60)
        for node in nodes:
            nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(node)
            wait(lambda: tcp_ping(nailgun_node['ip'], 22),
                 timeout=300,
                 timeout_msg=("Node {0} is still unreachable after {1} "
                              "seconds".format(nailgun_node['name'], 300)))
            checkers.verify_bootstrap_on_node(nailgun_node['ip'],
                                              os_type="ubuntu")

        self.fuel_web.deploy_cluster_wait(cluster_id)

        # Network verification
        self.fuel_web.verify_network(cluster_id)

        # Run ostf
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])
Beispiel #37
0
    def warm_shutdown_nodes(self, devops_nodes):
        logger.info('Shutting down (warm) nodes '
                    '{0}'.format([n.name for n in devops_nodes]))
        for node in devops_nodes:
            ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
            logger.debug('Shutdown node {0}'.format(node.name))
            self.ssh_manager.execute(ip, '/sbin/shutdown -Ph now & exit')

        for node in devops_nodes:
            ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
            logger.info('Wait a {0} node offline status'.format(node.name))
            try:
                self.wait_for_slave_network_down(ip)
            except TimeoutError:
                asserts.assert_false(
                    tcp_ping(ip, 22),
                    'Node {0} has not become '
                    'offline after warm shutdown'.format(node.name))
            node.destroy()
Beispiel #38
0
    def check_dpdk_instance_connectivity(self, os_conn, cluster_id,
                                         mem_page_size='2048'):
        """Boot VM with HugePages and ping it via floating IP

        :param os_conn: an object of connection to openstack services
        :param cluster_id: an integer number of cluster id
        :param mem_page_size: huge pages size
        :return:
        """

        extra_specs = {
            'hw:mem_page_size': mem_page_size
        }

        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        flavor_id = random.randint(10, 10000)
        name = 'system_test-{}'.format(random.randint(10, 10000))
        flavor = os_conn.create_flavor(name=name, ram=64,
                                       vcpus=1, disk=1,
                                       flavorid=flavor_id,
                                       extra_specs=extra_specs)

        server = os_conn.create_server_for_migration(neutron=True,
                                                     label=net_name,
                                                     flavor=flavor_id)
        os_conn.verify_instance_status(server, 'ACTIVE')

        float_ip = os_conn.assign_floating_ip(server)
        logger.info("Floating address {0} associated with instance {1}"
                    .format(float_ip.ip, server.id))

        logger.info("Wait for ping from instance {} "
                    "by floating ip".format(server.id))
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(float_ip.ip, 22),
            timeout=300,
            timeout_msg=("Instance {0} is unreachable for {1} seconds".
                         format(server.id, 300)))

        os_conn.delete_instance(server)
        os_conn.delete_flavor(flavor)
    def check_instance_creation(self):
        """Create instance and check connection."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        flavor = os_conn.get_flavor_by_name('m1.small')
        if self.image_name:
            image = os_conn.get_image(self.image_name)
        else:
            image = os_conn.get_image(self.vmware_image)
        sg = os_conn.get_security_group(self.sg_name)
        net = os_conn.get_network(self.net_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   net_id=net['id'], security_groups=[sg],
                                   flavor_id=flavor.id, timeout=900)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=210,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
Beispiel #40
0
    def create_bootable_volume_and_run_instance(self):
        """Create bootable volume and launch instance from it."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        image = os_conn.get_image(self.vmware_image)
        vol = os_conn.create_volume(image_id=image.id,
                                    availability_zone=self.cinder_az)
        block_device_mapping = {'vda': vol.id}

        net = os_conn.get_network(self.net_name)
        vm = os_conn.create_server(availability_zone=self.vcenter_az,
                                   image=False,
                                   net_id=net['id'],
                                   block_device_mapping=block_device_mapping)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
        os_conn.delete_volume_and_wait(vol)
Beispiel #41
0
    def create_instance_with_vmxnet3_adapter(self):
        """Create instance with vmxnet3 adapter."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        image = os_conn.get_image(self.image_name)
        os_conn.update_image(image,
                             properties={"hw_vif_model": "VirtualVmxnet3"})
        flavor = os_conn.get_flavor_by_name('m1.small')
        sg = os_conn.get_security_group(self.sg_name)
        net = os_conn.get_network(self.net_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   net_id=net['id'],
                                   security_groups=[sg],
                                   flavor_id=flavor.id,
                                   timeout=666)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22),
                     timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = '/usr/bin/lshw -class network | grep vmxnet3'
            res = os_conn.execute_through_host(remote,
                                               floating_ip.ip,
                                               cmd,
                                               creds=self.image_creds)
            logger.debug('OUTPUT: {}'.format(res))
            assert_equal(res['exit_code'], 0, "VMxnet3 driver is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
Beispiel #42
0
    def create_bootable_volume_and_run_instance(self):
        """Create bootable volume and launch instance from it."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        image = os_conn.get_image(self.vmware_image)
        vol = os_conn.create_volume(image_id=image.id,
                                    availability_zone=self.cinder_az)
        block_device_mapping = {'vda': vol.id}

        net = os_conn.get_network(self.net_name)
        vm = os_conn.create_server(availability_zone=self.vcenter_az,
                                   image=False,
                                   net_id=net['id'],
                                   block_device_mapping=block_device_mapping)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22),
                     timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
        os_conn.delete_volume_and_wait(vol)
 def await (self, timeout=1200):
     wait(lambda: tcp_ping(self.get_admin_node_ip(), 22), timeout=timeout)
    def negative_auto_cic_maintenance_mode(self):
        """Check negative scenario for auto maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Change UMM.CONF
            4. Unexpected reboot
            5. Check the controller not switching in maintenance mode
            6. Check the controller become available

        Duration 85m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Select a non-primary controller
        regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02")
        dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(
            regular_ctrl)
        _ip = regular_ctrl['ip']
        _id = regular_ctrl['id']

        asserts.assert_true('True' in check_available_mode(_ip),
                            "Maintenance mode is not available")
        logger.info('Disable UMM  on node-{0}'.format(_id))

        change_config(_ip, umm=False, reboot_count=0)

        asserts.assert_false('True' in check_available_mode(_ip),
                             "Maintenance mode should not be available")

        logger.info('Unexpected reboot on node-{0}'.format(_id))

        self.ssh_manager.check_call(ip=_ip, command='reboot >/dev/null & ')

        wait(
            lambda: not checkers.check_ping(self.env.get_admin_node_ip(), _ip),
            timeout=60 * 10,
            timeout_msg='Node {} still responds to ping'.format(
                dregular_ctrl.name))

        # Node don't have enough time for set offline status
        # after reboot
        # Just waiting

        asserts.assert_true(
            checkers.check_ping(self.env.get_admin_node_ip(),
                                _ip,
                                deadline=600),
            "Host {0} is not reachable by ping during 600 sec".format(_ip))

        self.fuel_web.wait_node_is_online(dregular_ctrl)

        logger.info('Check that node-{0} not in maintenance mode after'
                    ' unexpected reboot'.format(_id))

        wait(lambda: tcp_ping(_ip, 22),
             timeout=60 * 10,
             timeout_msg='Node {} still is not available by SSH'.format(
                 dregular_ctrl.name))

        asserts.assert_false('True' in check_auto_mode(_ip),
                             "Maintenance mode should not switched")

        # Wait until MySQL Galera is UP on some controller
        self.fuel_web.wait_mysql_galera_is_up([dregular_ctrl.name])

        # Wait until Cinder services UP on a controller
        self.fuel_web.wait_cinder_is_up([dregular_ctrl.name])

        # Wait until RabbitMQ cluster is UP
        wait_pass(lambda: self.fuel_web.run_single_ostf_test(
            cluster_id,
            test_sets=['ha'],
            test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'RabbitMQ availability')),
                  timeout=1500)
        logger.info('RabbitMQ cluster is available')

        # TODO(astudenov): add timeout_msg
        wait_pass(lambda: self.fuel_web.run_single_ostf_test(
            cluster_id,
            test_sets=['sanity'],
            test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'Check that required services are running')),
                  timeout=1500)
        logger.info("Required services are running")

        try:
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 600 second try one more time"
                         " and if it fails again - test will fails ")
            time.sleep(600)
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['smoke', 'sanity', 'ha'])
Beispiel #45
0
    def deploy_neutron_ip_v6(self):
        """Check IPv6 only functionality for Neutron VLAN

        Test disabled and move to fuel_tests suite:
            fuel_tests.test.test_neutron_ipv6.TestNeutronIPv6

        Scenario:
            1. Revert deploy_neutron_vlan snapshot
            2. Create network resources: two dualstack network IPv6 subnets
                (should be in SLAAC mode,
                address space should not intersect),
                virtual router and set gateway.
            3. Create a Security Group,
                that allows SSH and ICMP for both IPv4 and IPv6.
            4. Launch two instances, one for each network.
            5. Attach Floating IP for both instances.
            6. SSH to the main instance and ping6 another instance.

        Duration 10m
        Snapshot deploy_neutron_ip_v6

        """
        # pylint: disable=W0101
        warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
        raise SkipTest("Test disabled and move to fuel_tests suite")

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("deploy_neutron_vlan")

        cluster_id = self.fuel_web.get_last_created_cluster()
        public_vip = self.fuel_web.get_public_vip(cluster_id)
        logger.info('Public vip is %s', public_vip)

        os_conn = os_actions.OpenStackActions(controller_ip=public_vip,
                                              user='******',
                                              passwd='simpleVlan',
                                              tenant='simpleVlan')

        tenant = os_conn.get_tenant('simpleVlan')

        self.show_step(2)
        net1, net2 = os_conn.create_network_resources_for_ipv6_test(tenant)

        self.show_step(3)
        security_group = os_conn.create_sec_group_for_ssh()

        self.show_step(4)
        instance1 = os_conn.create_server(
            name='instance1',
            security_groups=[security_group],
            net_id=net1['id'],
        )

        instance2 = os_conn.create_server(
            name='instance2',
            security_groups=[security_group],
            net_id=net2['id'],
        )

        self.show_step(5)
        floating_ip = os_conn.assign_floating_ip(instance1)
        floating_ip2 = os_conn.assign_floating_ip(instance2)

        self.show_step(6)
        get_instance_ipv6(instance1, net1)
        instance2_ipv6 = get_instance_ipv6(instance2, net2)

        node_ip = self.fuel_web.get_node_ip_by_devops_name("slave-01")
        remote = ssh_manager.get_remote(node_ip)
        for instance_ip, instance in ((floating_ip.ip, instance1),
                                      (floating_ip2.ip, instance2)):
            logger.info("Wait for ping from instance {} "
                        "by floating ip".format(instance.id))
            devops_helpers.wait(
                lambda: devops_helpers.tcp_ping(instance_ip, 22),
                timeout=300,
                timeout_msg=(
                    "Instance {0} is unreachable for {1} seconds".format(
                        instance.id, 300)))

        ping6_from_instance(remote, floating_ip.ip, instance2_ipv6)

        self.env.make_snapshot('deploy_neutron_ip_v6')
Beispiel #46
0
    def check_rh_warm_reboot(self):
        """Check that resumed VM is working properly after warm reboot of
        RH-based compute

        Scenario:
            1. Revert environment with RH-compute.
            2. Check that services are ready.
            3. Boot VM on compute and check its connectivity via floating ip.
            4. Warm reboot RH-based compute.
            5. Verify VM connectivity via floating ip after successful reboot
            and VM resume action.

        Duration 20m
        Snapshot check_rh_warm_reboot
        """

        self.show_step(1, initialize=True)
        self.env.revert_snapshot('ready_ha_one_controller_with_rh_compute',
                                 skip_timesync=True,
                                 skip_slaves_check=True)
        self.check_slaves_are_ready()
        logger.debug('All slaves online.')

        self.show_step(2)
        cluster_id = self.fuel_web.get_last_created_cluster()
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
        logger.debug('Cluster up and ready.')

        self.show_step(3)
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, roles=('controller', ))
        asserts.assert_equal(
            len(controllers), 1,
            'Environment does not have 1 controller node, '
            'found {} nodes!'.format(len(controllers)))
        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])[0]
        target_node = self.fuel_web.get_devops_node_by_nailgun_node(compute)
        net_label = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        vm = os_conn.create_server_for_migration(neutron=True, label=net_label)
        vm_floating_ip = os_conn.assign_floating_ip(vm)
        logger.info('Trying to get vm via tcp.')
        try:
            wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120)
        except TimeoutError:
            raise TimeoutError('Can not ping instance'
                               ' by floating ip {0}'.format(vm_floating_ip.ip))
        logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip))
        self.show_step(4)
        self.warm_restart_nodes([target_node])
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
        logger.info('All cluster services up and '
                    'running after compute reboot.')

        self.show_step(5)
        asserts.assert_equal(
            os_conn.get_instance_detail(vm).status, "ACTIVE",
            "Instance did not reach active state after compute back online, "
            "current state is {0}".format(
                os_conn.get_instance_detail(vm).status))
        logger.info('Spawned VM is ACTIVE. Trying to '
                    'access it via ip: {0}'.format(vm_floating_ip.ip))
        try:
            wait(lambda: tcp_ping(vm_floating_ip.ip, 22), timeout=120)
        except TimeoutError:
            raise TimeoutError('Can not ping instance'
                               ' by floating ip {0}'.format(vm_floating_ip.ip))
        logger.info('VM is accessible. Deleting it.')
        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
Beispiel #47
0
    def __init__(self, os_image=None):
        """Constructor for create environment."""
        self.os_image = os_image or settings.OS_IMAGE
        super(PuppetEnvironment, self).__init__(self.os_image)
        self.environment = super(PuppetEnvironment, self).d_env
        self.start_env()

    @property
    def env_name(self):
        return os.environ.get('PPENV_NAME', 'pp-integration')

    def start_env(self):
        self.d_env.start(self.d_env.nodes())

    def execute_cmd(self, command, debug=True):
        """Execute command on node."""
        return self.d_env.get_admin_remote().execute(
            command, verbose=debug)['exit_code']

    def await(self, timeout=1200):
        wait(
            lambda: tcp_ping(self.get_admin_node_ip(), 22), timeout=timeout)


if __name__ == "__main__":
    env = PuppetEnvironment(
        '/var/lib/libvirt/images/ubuntu-12.04.1-server-amd64-p2.qcow2')
    env.await()
    env.make_snapshot(snapshot_name="test1")
    env.execute_cmd('apt-get install mc')
Beispiel #48
0
    def remote(self, network_name, login, password=None, private_keys=None):
        """
        :rtype : SSHClient
        """
        return SSHClient(
            self.get_ip_address_by_network_name(network_name),
            username=login,
            password=password, private_keys=private_keys)

    def send_keys(self, keys):
        self.driver.node_send_keys(self, keys)

    def await(self, network_name, timeout=120):
        wait(
            lambda: tcp_ping(
                self.get_ip_address_by_network_name(network_name), 22),
            timeout=timeout)

    def define(self):
        self.driver.node_define(self)
        self.save()

    def start(self):
        self.create(verbose=False)

    def create(self, verbose=True):
        if verbose or not self.driver.node_active(self):
            self.driver.node_create(self)

    def destroy(self, verbose=True):
        if verbose or self.driver.node_active(self):
Beispiel #49
0
 def verify_vms_connection(ironic_conn):
     srv_list = ironic_conn.get_servers()
     for srv in srv_list:
         wait(lambda: tcp_ping(srv.networks['baremetal'][0], 22),
              timeout=60 * 10,
              timeout_msg='Failed to connect to port 22')
Beispiel #50
0
    def migrate_vm_backed_with_ceph(self):
        """Check VM backed with ceph migration in ha mode with 1 controller

        Scenario:
            1. Create cluster
            2. Add 1 node with controller and ceph OSD roles
            3. Add 2 nodes with compute and ceph OSD roles
            4. Deploy the cluster
            5. Check ceph status
            6. Run OSTF
            7. Create a new VM, assign floating ip
            8. Migrate VM
            9. Check cluster and server state after migration
            10. Terminate VM
            11. Check that DHCP lease is not offered for MAC of deleted VM
            12. Create a new VM for migration, assign floating ip
            13. Create a volume and attach it to the VM
            14. Create filesystem on the new volume and mount it to the VM
            15. Migrate VM
            16. Mount the volume after migration
            17. Check cluster and server state after migration
            18. Terminate VM

        Duration 35m
        Snapshot vm_backed_with_ceph_live_migration
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                'volumes_ceph': True,
                'images_ceph': True,
                'ephemeral_ceph': True,
                'volumes_lvm': False,
            }
        )

        self.show_step(2)
        self.show_step(3)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['compute', 'ceph-osd']
            }
        )
        creds = ("cirros", "test")

        self.show_step(4)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        def _check():
            # Run volume test several times with hope that it pass
            test_path = ostf_test_mapping.OSTF_TEST_MAPPING.get(
                'Create volume and attach it to instance')
            logger.debug('Start to run test {0}'.format(test_path))
            self.fuel_web.run_single_ostf_test(
                cluster_id, test_sets=['smoke'],
                test_name=test_path)

        self.show_step(5)
        try:
            _check()
        except AssertionError:
            logger.debug(AssertionError)
            logger.debug("Test failed from first probe,"
                         " we sleep 60 second try one more time "
                         "and if it fails again - test will fails ")
            time.sleep(60)
            _check()

        self.show_step(6)

        # Run ostf
        self.fuel_web.run_ostf(cluster_id)

        self.show_step(7)

        # Create new server
        os = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']

        logger.info("Create new server")
        srv = os.create_server_for_migration(
            neutron=True,
            scenario='./fuelweb_test/helpers/instance_initial_scenario',
            label=net_name)
        logger.info("Srv is currently in status: {:s}".format(srv.status))

        # Prepare to DHCP leases checks
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        srv_instance_ip = os.get_nova_instance_ip(srv, net_name=net_name)
        srv_host_name = self.fuel_web.find_devops_node_by_nailgun_fqdn(
            os.get_srv_hypervisor_name(srv),
            self.env.d_env.nodes().slaves[:3]).name
        net_id = os.get_network(net_name)['id']
        ports = os.get_neutron_dhcp_ports(net_id)
        dhcp_server_ip = ports[0]['fixed_ips'][0]['ip_address']
        with self.fuel_web.get_ssh_for_node(srv_host_name) as srv_remote_node:
            srv_instance_mac = os.get_instance_mac(srv_remote_node, srv)

        logger.info("Assigning floating ip to server")
        floating_ip = os.assign_floating_ip(srv)
        srv_host = os.get_srv_host_name(srv)
        logger.info("Server is on host {:s}".format(srv_host))

        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)

        def ssh_ready(remote, ip, creds):
            try:
                os.execute_through_host(remote, ip, '/bin/true', creds)
                return True
            except paramiko.AuthenticationException:
                logger.info("Authentication failed. Trying again in a minute.")
                time.sleep(60)
                return False

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            wait(lambda: ssh_ready(remote, floating_ip.ip, creds), timeout=300)
            md5before = os.get_md5sum(
                "/home/test_file", remote, floating_ip.ip, creds)

        self.show_step(8)

        logger.info("Get available computes")
        avail_hosts = os.get_hosts_for_migr(srv_host)

        logger.info("Migrating server")
        new_srv = os.migrate_server(srv, avail_hosts[0], timeout=200)
        logger.info("Check cluster and server state after migration")

        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            md5after = os.get_md5sum(
                "/home/test_file", remote, floating_ip.ip, creds)

        assert_true(
            md5after in md5before,
            "Md5 checksums don`t match."
            "Before migration md5 was equal to: {bef}"
            "Now it equals: {aft}".format(bef=md5before, aft=md5after))

        self.show_step(9)

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            res = os.execute_through_host(
                remote, floating_ip.ip,
                "ping -q -c3 -w10 {0} | grep 'received' |"
                " grep -v '0 packets received'"
                .format(settings.PUBLIC_TEST_IP), creds)
        logger.info("Ping {0} result on vm is: {1}"
                    .format(settings.PUBLIC_TEST_IP, res['stdout']))

        logger.info("Check Ceph health is ok after migration")
        self.fuel_web.check_ceph_status(cluster_id)

        logger.info(
            "Server is now on host {:s}".format(os.get_srv_host_name(new_srv)))

        self.show_step(10)

        logger.info("Terminate migrated server")
        os.delete_instance(new_srv)
        os.verify_srv_deleted(new_srv)

        self.show_step(11)
        # Check if the dhcp lease for instance still remains
        # on the previous compute node. Related Bug: #1391010
        _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            dhcp_port_tag = ovs_get_tag_by_port(remote, ports[0]['id'])
            assert_false(checkers.check_neutron_dhcp_lease(_ip,
                                                           srv_instance_ip,
                                                           srv_instance_mac,
                                                           dhcp_server_ip,
                                                           dhcp_port_tag),
                         "Instance has been deleted, but it's DHCP lease "
                         "for IP:{0} with MAC:{1} still offers by Neutron DHCP"
                         " agent.".format(srv_instance_ip,
                                          srv_instance_mac))
        self.show_step(12)
        # Create a new server
        logger.info("Create a new server for migration with volume")
        srv = os.create_server_for_migration(
            neutron=True,
            scenario='./fuelweb_test/helpers/instance_initial_scenario',
            label=net_name)
        logger.info("Srv is currently in status: {:s}".format(srv.status))

        logger.info("Assigning floating ip to server")
        floating_ip = os.assign_floating_ip(srv)
        srv_host = os.get_srv_host_name(srv)
        logger.info("Server is on host {:s}".format(srv_host))

        self.show_step(13)
        logger.info("Create volume")
        vol = os.create_volume()
        logger.info("Attach volume to server")
        os.attach_volume(vol, srv)

        self.show_step(14)
        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
        logger.info("Create filesystem and mount volume")

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            wait(lambda: ssh_ready(remote, floating_ip.ip, creds), timeout=300)

            os.execute_through_host(
                remote,
                floating_ip.ip, 'sudo sh /home/mount_volume.sh', creds)

            os.execute_through_host(
                remote,
                floating_ip.ip, 'sudo touch /mnt/file-on-volume', creds)

        self.show_step(15)
        logger.info("Get available computes")
        avail_hosts = os.get_hosts_for_migr(srv_host)

        logger.info("Migrating server")
        new_srv = os.migrate_server(srv, avail_hosts[0], timeout=120)

        logger.info("Check cluster and server state after migration")
        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)

        self.show_step(16)
        logger.info("Mount volume after migration")
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            out = os.execute_through_host(
                remote,
                floating_ip.ip, 'sudo mount /dev/vdb /mnt', creds)

        logger.info("out of mounting volume is: {:s}".format(out['stdout']))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            out = os.execute_through_host(
                remote,
                floating_ip.ip, "sudo ls /mnt", creds)
        assert_true("file-on-volume" in out['stdout'],
                    "File is absent in /mnt")

        self.show_step(17)
        logger.info("Check Ceph health is ok after migration")
        self.fuel_web.check_ceph_status(cluster_id)

        logger.info(
            "Server is now on host {:s}".format(os.get_srv_host_name(new_srv)))

        self.show_step(18)
        logger.info("Terminate migrated server")
        os.delete_instance(new_srv)
        os.verify_srv_deleted(new_srv)

        self.env.make_snapshot(
            "vm_backed_with_ceph_live_migration")
Beispiel #51
0
    def compute_stop_reinstallation(self):
        """Verify stop reinstallation of compute.

        Scenario:
            1. Revert the snapshot
            2. Create an OS volume and OS instance
            3. Mark 'cinder' and 'vm' partitions to be preserved
            4. Stop reinstallation process of compute
            5. Start the reinstallation process again
            6. Run network verification
            7. Run OSTF
            8. Verify that the volume is present and has 'available' status
               after the node reinstallation
            9. Verify that the VM is available and pingable
               after the node reinstallation

        Duration: 115m

        """
        self.env.revert_snapshot("node_reinstallation_env")

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Create an OS volume
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        volume = os_conn.create_volume()

        # Create an OS instance
        cmp_host = os_conn.get_hypervisors()[0]

        net_label = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']

        vm = os_conn.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(cmp_host.hypervisor_hostname),
            label=net_label)
        vm_floating_ip = os_conn.assign_floating_ip(vm)
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(vm_floating_ip.ip, 22),
            timeout=120)

        cmp_nailgun = self.fuel_web.get_nailgun_node_by_fqdn(
            cmp_host.hypervisor_hostname)

        # Mark 'cinder' and 'vm' partitions to be preserved
        with self.env.d_env.get_admin_remote() as remote:
            PartitionPreservation._preserve_partition(remote,
                                                      cmp_nailgun['id'],
                                                      "cinder")
            PartitionPreservation._preserve_partition(remote,
                                                      cmp_nailgun['id'], "vm")

        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        devops_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes(
            slave_nodes)

        logger.info('Stop reinstallation process')
        self._stop_reinstallation(self.fuel_web, cluster_id,
                                  [str(cmp_nailgun['id'])], devops_nodes)

        self.fuel_web.verify_network(cluster_id)
        logger.info('Start the reinstallation process again')
        NodeReinstallationEnv._reinstall_nodes(self.fuel_web, cluster_id,
                                               [str(cmp_nailgun['id'])])

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity'])

        # Verify that the created volume is still available
        try:
            volume = os_conn.cinder.volumes.get(volume.id)
        except NotFound:
            raise AssertionError(
                "{0} volume is not available after its {1} hosting node "
                "reinstallation".format(volume.id, cmp_nailgun['fqdn']))
        expected_status = "available"
        assert_equal(
            expected_status, volume.status,
            "{0} volume status is {1} after its {2} hosting node "
            "reinstallation. Expected status is {3}.".format(
                volume.id, volume.status, cmp_nailgun['fqdn'],
                expected_status))

        # Verify that the VM is still available
        try:
            os_conn.verify_instance_status(vm, 'ACTIVE')
        except AssertionError:
            raise AssertionError(
                "{0} VM is not available after its {1} hosting node "
                "reinstallation".format(vm.name, cmp_host.hypervisor_hostname))
        assert_true(
            devops_helpers.tcp_ping(vm_floating_ip.ip, 22),
            "{0} VM is not accessible via its {1} floating "
            "ip".format(vm.name, vm_floating_ip))
    def check_ol_hard_reboot(self):
        """Resume VM after hard reboot of OL-based compute

        Scenario:
            1. Revert environment with OL-compute.
            2. Check that services are ready.
            3. Boot VM on compute and check its connectivity via floating ip.
            4. Hard reboot OL-based compute.
            5. Verify VM connectivity via floating ip after successful reboot
               and VM resume action.

        Duration: 20m
        Snapshot: check_ol_hard_reboot
        """

        self.show_step(1)
        self.env.revert_snapshot('ready_ha_one_controller_with_ol_compute',
                                 skip_timesync=True,
                                 skip_slaves_check=True)
        self.check_slaves_are_ready()
        logger.debug('All slaves online.')

        self.show_step(2)
        cluster_id = self.fuel_web.get_last_created_cluster()
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
        logger.debug('Cluster up and ready.')

        self.show_step(3)
        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, roles=('controller', ))
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        asserts.assert_equal(
            len(controllers), 1,
            'Environment does not have 1 controller node, '
            'found {} nodes!'.format(len(controllers)))
        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])[0]
        target_node = self.fuel_web.get_devops_node_by_nailgun_node(compute)
        target_node_ip = self.fuel_web.get_node_ip_by_devops_name(
            target_node.name)
        net_label = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        vm = os_conn.create_server_for_migration(neutron=True, label=net_label)
        vm_floating_ip = os_conn.assign_floating_ip(vm)
        logger.info('Trying to get vm via tcp.')
        wait(lambda: tcp_ping(vm_floating_ip.ip, 22),
             timeout=120,
             timeout_msg='Can not ping instance '
             'by floating ip {0}'.format(vm_floating_ip.ip))
        logger.info('VM is accessible via ip: {0}'.format(vm_floating_ip.ip))
        self.show_step(4)
        target_node.destroy()
        asserts.assert_false(target_node.driver.node_active(node=target_node),
                             'Target node still active')
        target_node.start()
        asserts.assert_true(target_node.driver.node_active(node=target_node),
                            'Target node did not start')
        self.wait_for_slave_provision(target_node_ip)
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
        logger.info('All cluster services up and '
                    'running after compute hard reboot.')

        self.show_step(5)
        asserts.assert_equal(
            os_conn.get_instance_detail(vm).status, "ACTIVE",
            "Instance did not reach active state after compute back online, "
            "current state is {0}".format(
                os_conn.get_instance_detail(vm).status))
        logger.info('Spawned VM is ACTIVE. Trying to '
                    'access it via ip: {0}'.format(vm_floating_ip.ip))
        wait(lambda: tcp_ping(vm_floating_ip.ip, 22),
             timeout=120,
             timeout_msg='Can not ping instance '
             'by floating ip {0}'.format(vm_floating_ip.ip))
        logger.info('VM is accessible. Deleting it.')
        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)

        self.env.make_snapshot("check_ol_hard_reboot")
    def negative_auto_cic_maintenance_mode(self):
        """Check negative scenario for auto maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Change UMM.CONF
            4. Unexpected reboot
            5. Check the controller not switching in maintenance mode
            6. Check the controller become available

        Duration 85m
        """
        self.env.revert_snapshot("cic_maintenance_mode")

        cluster_id = self.fuel_web.get_last_created_cluster()

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(cluster_id, ["controller"])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for devops_node in d_ctrls:
            _ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)["ip"]
            with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
                assert_true("True" in check_available_mode(remote), "Maintenance mode is not available")

                logger.info("Change UMM.CONF on node %s", devops_node.name)
                command1 = "echo -e 'UMM=yes\nREBOOT_COUNT=0\n" "COUNTER_RESET_TIME=10' > /etc/umm.conf"

                result = remote.execute(command1)
                assert_equal(
                    result["exit_code"], 0, 'Failed to execute "{0}" on remote host: {1}'.format(command1, result)
                )

                result = remote.execute("umm disable")
                assert_equal(
                    result["exit_code"], 0, 'Failed to execute "{0}" on remote host: {1}'.format("umm disable", result)
                )

                assert_false("True" in check_available_mode(remote), "Maintenance mode should not be available")

                logger.info("Unexpected reboot on node %s", devops_node.name)
                command2 = "reboot --force >/dev/null & "
                result = remote.execute(command2)
                assert_equal(
                    result["exit_code"], 0, 'Failed to execute "{0}" on remote host: {1}'.format(command2, result)
                )
                wait(lambda: not tcp_ping(_ip, 22), timeout=60 * 10)

            # Node don't have enough time for set offline status
            # after reboot --force
            # Just waiting

            wait(lambda: tcp_ping(_ip, 22), timeout=60 * 10)

            logger.info("Wait a %s node online status after unexpected " "reboot", devops_node.name)
            self.fuel_web.wait_nodes_get_online_state([devops_node])

            logger.info("Check that %s node not in maintenance mode after" " unexpected reboot", devops_node.name)

            with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
                assert_false("True" in check_auto_mode(remote), "Maintenance mode should not switched")

            # Wait until MySQL Galera is UP on some controller
            self.fuel_web.wait_mysql_galera_is_up([n.name for n in d_ctrls])

            # Wait until Cinder services UP on a controller
            self.fuel_web.wait_cinder_is_up([n.name for n in d_ctrls])

            _wait(
                lambda: self.fuel_web.run_single_ostf_test(
                    cluster_id,
                    test_sets=["sanity"],
                    test_name=map_ostf.OSTF_TEST_MAPPING.get("Check that required services are running"),
                ),
                timeout=1500,
            )
            logger.debug("Required services are running")

            _wait(lambda: self.fuel_web.run_ostf(cluster_id, test_sets=["ha"]), timeout=1500)
            logger.debug("HA tests are pass now")

            try:
                self.fuel_web.run_ostf(cluster_id, test_sets=["smoke", "sanity"])
            except AssertionError:
                logger.debug(
                    "Test failed from first probe,"
                    " we sleep 600 second try one more time"
                    " and if it fails again - test will fails "
                )
                time.sleep(600)
                self.fuel_web.run_ostf(cluster_id, test_sets=["smoke", "sanity"])
    def manual_cic_maintenance_mode(self):
        """Check manual maintenance mode for controller

        Scenario:
            1. Revert snapshot
            2. Switch in maintenance mode
            3. Wait until controller is rebooting
            4. Exit maintenance mode
            5. Check the controller become available

        Duration 155m
        """
        self.env.revert_snapshot("cic_maintenance_mode")

        cluster_id = self.fuel_web.get_last_created_cluster()

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(cluster_id, ["controller"])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for devops_node in d_ctrls:
            with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
                assert_true("True" in check_available_mode(remote), "Maintenance mode is not available")

                logger.info("Maintenance mode for node %s", devops_node.name)
                result = remote.execute("umm on")
                assert_equal(
                    result["exit_code"], 0, 'Failed to execute "{0}" on remote host: {1}'.format("umm on", result)
                )
            logger.info("Wait a %s node offline status after switching " "maintenance mode ", devops_node.name)
            try:
                wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(devops_node)["online"], timeout=60 * 10)
            except TimeoutError:
                assert_false(
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)["online"],
                    "Node {0} has not become offline after" "switching maintenance mode".format(devops_node.name),
                )

            logger.info("Check that %s node in maintenance mode after " "switching", devops_node.name)

            _ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)["ip"]
            wait(lambda: tcp_ping(_ip, 22), timeout=60 * 10)
            with self.fuel_web.get_ssh_for_node(devops_node.name) as remote:
                assert_true("True" in check_auto_mode(remote), "Maintenance mode is not switch")

                result = remote.execute("umm off")
                assert_equal(
                    result["exit_code"], 0, 'Failed to execute "{0}" on remote host: {1}'.format("umm off", result)
                )

            logger.info("Wait a %s node online status", devops_node.name)
            try:
                wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(devops_node)["online"], timeout=60 * 10)
            except TimeoutError:
                assert_true(
                    self.fuel_web.get_nailgun_node_by_devops_node(devops_node)["online"],
                    "Node {0} has not become online after " "exiting maintenance mode".format(devops_node.name),
                )

            # Wait until MySQL Galera is UP on some controller
            self.fuel_web.wait_mysql_galera_is_up([n.name for n in d_ctrls])

            # Wait until Cinder services UP on a controller
            self.fuel_web.wait_cinder_is_up([n.name for n in d_ctrls])

            _wait(
                lambda: self.fuel_web.run_single_ostf_test(
                    cluster_id,
                    test_sets=["sanity"],
                    test_name=map_ostf.OSTF_TEST_MAPPING.get("Check that required services are running"),
                ),
                timeout=1500,
            )
            logger.debug("Required services are running")

            _wait(lambda: self.fuel_web.run_ostf(cluster_id, test_sets=["ha"]), timeout=1500)
            logger.debug("HA tests are pass now")

            try:
                self.fuel_web.run_ostf(cluster_id, test_sets=["smoke", "sanity"])
            except AssertionError:
                logger.debug(
                    "Test failed from first probe,"
                    " we sleep 600 second try one more time"
                    " and if it fails again - test will fails "
                )
                time.sleep(600)
                self.fuel_web.run_ostf(cluster_id, test_sets=["smoke", "sanity"])
    def contrail_public_connectivity_from_instance_without_fip(self):
        """Check network connectivity from instance without floating IP.

        Scenario:
            1. Setup systest_setup.
            2. Launch an instance using the default image, flavor and
               security group.
            3. Check that public IP 8.8.8.8 can be pinged from instance.
            4. Delete instance.

        Duration 5 min

        """
        self.show_step(1)
        self.env.revert_snapshot('systest_setup')
        net_name = 'admin_internal_net'
        cluster_id = self.fuel_web.get_last_created_cluster()
        os_ip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        self.show_step(2)

        # Launch instance as access point
        os_conn.goodbye_security()
        flavor = [
            f for f in os_conn.nova.flavors.list() if f.name == 'm1.micro'
        ][0]
        image = os_conn.nova.images.list().pop()
        network = os_conn.nova.networks.find(label=net_name)
        access_point = os_conn.nova.servers.create(flavor=flavor,
                                                   name='test1',
                                                   image=image,
                                                   nics=[{
                                                       'net-id': network.id
                                                   }])
        wait(lambda: os_conn.get_instance_detail(access_point).status ==
             'ACTIVE',
             timeout=300)
        access_point_fip = os_conn.assign_floating_ip(access_point).ip
        wait(lambda: tcp_ping(access_point_fip, 22),
             timeout=120,
             interval=5,
             timeout_msg="Node {0} is not accessible by SSH.".format(
                 access_point_fip))

        instance = os_conn.nova.servers.create(flavor=flavor,
                                               name='test2',
                                               image=image,
                                               nics=[{
                                                   'net-id': network.id
                                               }])
        wait(lambda: os_conn.get_instance_detail(instance).status == 'ACTIVE',
             timeout=300)

        self.show_step(3)
        # Get private ip of instance
        logger.info('{}'.format(os_conn.nova.servers.ips(instance.id)))
        ip = os_conn.nova.servers.ips(instance.id)[net_name].pop()['addr']
        with SSH(access_point_fip) as remote:
            remote.check_connection_through_host({ip: ['8.8.8.8']})

        self.show_step(4)
        for instance in [access_point, instance]:
            os_conn.delete_instance(instance)
            wait(lambda: os_conn.is_srv_deleted(instance),
                 timeout=200,
                 timeout_msg="Instance was not deleted.")
    def contrail_ceilometer_metrics(self):
        """Check that ceilometer collects contrail metrics.

        Scenario:
            1. Setup systest_setup.
            2. Create 2 instances in the default network.
            3. Send icpm packets from one instance to another.
            4. Check contrail ceilometer metrics:
                *ip.floating.receive.bytes
                *ip.floating.receive.packets
                *ip.floating.transmit.bytes
                *ip.floating.transmit.packets


        Duration 120 min

        """
        # constants
        ceilometer_metrics = [
            'ip.floating.receive.bytes', 'ip.floating.receive.packets',
            'ip.floating.transmit.bytes', 'ip.floating.transmit.packets'
        ]
        command = """source openrc; ceilometer sample-list -m {0}
         -q 'resource_id={1}'"""
        metric_type = 'cumulative'
        time_to_update_metrics = 60 * 10
        message = "Ceilometer doesn't collect metric {0}."

        self.show_step(1)
        self.env.revert_snapshot('systest_setup')
        self.show_step(2)
        cluster_id = self.fuel_web.get_last_created_cluster()
        os_ip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        srv_1 = os_conn.create_server_for_migration(neutron=True,
                                                    label='admin_internal_net')
        fip_1 = os_conn.assign_floating_ip(srv_1)

        srv_2 = os_conn.create_server_for_migration(neutron=True,
                                                    label='admin_internal_net')
        fip_2 = os_conn.assign_floating_ip(srv_2)

        for fip in [fip_1.ip, fip_2.ip]:
            wait(lambda: tcp_ping(fip, 22),
                 timeout=60,
                 interval=5,
                 timeout_msg="Node {0} is not accessible by SSH.".format(fip))

        self.show_step(3)
        controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[1])
        self.ping_instance_from_instance(os_conn, controller.name,
                                         {fip_1.ip: [fip_2.ip]})

        self.show_step(4)
        with self.fuel_web.get_ssh_for_node("slave-02") as ssh:
            for metric in ceilometer_metrics:
                for resource_id in [fip_1.id, fip_2.id]:
                    wait(lambda: len(
                        list(
                            ssh.execute(command.format(metric, resource_id))[
                                'stdout'])) > 4,
                         timeout=time_to_update_metrics,
                         timeout_msg=message.format(metric))
                    m = list(
                        ssh.execute(command.format(metric,
                                                   resource_id))['stdout'])
                    # Check type of metrics
                    collect_metric_type = m[3].split(' ')[5]
                    assert_true(
                        collect_metric_type == metric_type,
                        "Type of metric {0} not equel to {1}.".format(
                            collect_metric_type, metric_type))
    def contrail_vm_connection_in_different_tenants(self):
        """Create a new network via Contrail.

        Scenario:
            1. Setup systest_setup.
            2. Create 1 new tenant(project).
            3. Create networks in the each tenants.
            4. Launch 2 new instance in different tenants(projects).
            5. Check ping connectivity between instances.
            6. Verify on Contrail controller WebUI that networks are there and
               VMs are attached to different networks.

        Duration: 15 min

        """
        # constants
        net_admin = 'net_1'
        net_test = 'net_2'
        cidr = '192.168.115.0'
        self.show_step(1)
        self.env.revert_snapshot('systest_setup')
        cluster_id = self.fuel_web.get_last_created_cluster()

        self.show_step(2)
        os_ip = self.fuel_web.get_public_vip(cluster_id)
        contrail_client = ContrailClient(os_ip)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        test_tenant = 'test'
        os_conn.create_user_and_tenant(test_tenant, test_tenant, test_tenant)
        self.add_role_to_user(os_conn, 'test', 'admin', 'test')

        test = os_actions.OpenStackActions(os_ip, test_tenant, test_tenant,
                                           test_tenant)
        test_contrail = ContrailClient(os_ip,
                                       credentials={
                                           'username': test_tenant,
                                           'tenant_name': test_tenant,
                                           'password': test_tenant
                                       })
        tenant = test.get_tenant(test_tenant)
        for user in os_conn.keystone.users.list():
            if user.name != test_tenant:
                tenant.add_user(user, self.get_role(test, 'admin'))

        self.show_step(3)
        logger.info('Create network {0} in the {1}'.format(
            net_admin, tenant.name))
        network_test = test.create_network(network_name=net_test,
                                           tenant_id=tenant.id)['network']

        subnet_test = test.create_subnet(subnet_name=net_test,
                                         network_id=network_test['id'],
                                         cidr=cidr + '/24')

        router = test.create_router('router_1', tenant=tenant)
        test.add_router_interface(router_id=router["id"],
                                  subnet_id=subnet_test["id"])

        network = contrail_client.create_network(
            ["default-domain", SERVTEST_TENANT, net_admin], [{
                "attr": {
                    "ipam_subnets": [{
                        "subnet": {
                            "ip_prefix": '10.1.1.0',
                            "ip_prefix_len": 24
                        }
                    }]
                },
                "to":
                ["default-domain", "default-project", "default-network-ipam"]
            }])['virtual-network']
        default_router = contrail_client.get_router_by_name(SERVTEST_TENANT)
        contrail_client.add_router_interface(network, default_router)

        self.show_step(4)
        srv_1 = os_conn.create_server_for_migration(neutron=True,
                                                    label=net_admin)
        fip_1 = os_conn.assign_floating_ip(srv_1).ip
        wait(lambda: tcp_ping(fip_1, 22),
             timeout=60,
             interval=5,
             timeout_msg="Node {0} is not accessible by SSH.".format(fip_1))
        ip_1 = os_conn.get_nova_instance_ip(srv_1, net_name=net_admin)
        srv_2 = test.create_server_for_migration(neutron=True, label=net_test)
        srv_3 = test.create_server_for_migration(neutron=True, label=net_test)
        ips = []
        for srv in [srv_2, srv_3]:
            ip = (test.get_nova_instance_ip(srv, net_name=net_test))
            if ip_1 != ip_1:
                ips.append(ip)

        self.show_step(5)
        ip_pair = {}
        ip_pair[fip_1] = ips

        controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[1])
        self.ping_instance_from_instance(os_conn,
                                         controller.name,
                                         ip_pair,
                                         ping_result=1)

        self.show_step(6)
        logger.info('{}'.format(network))
        net_info = contrail_client.get_net_by_id(
            network['uuid'])['virtual-network']
        logger.info(''.format(net_info))
        net_interface_ids = []
        for interface in net_info['virtual_machine_interface_back_refs']:
            net_interface_ids.append(interface['uuid'])
        interf_id = contrail_client.get_instance_by_id(
            srv_1.id
        )['virtual-machine']['virtual_machine_interface_back_refs'][0]['uuid']
        assert_true(
            interf_id in net_interface_ids,
            '{0} is not attached to network {1}'.format(srv_1.name, net_admin))
        net_info = test_contrail.get_net_by_id(
            network_test['id'])['virtual-network']
        net_interface_ids = []
        for interface in net_info['virtual_machine_interface_back_refs']:
            net_interface_ids.append(interface['uuid'])
        interf_id = test_contrail.get_instance_by_id(
            srv_2.id
        )['virtual-machine']['virtual_machine_interface_back_refs'][0]['uuid']
        assert_true(
            interf_id in net_interface_ids,
            '{0} is not attached to network {1}'.format(srv_2.name, net_test))
    def create_new_network_via_contrail(self):
        """Create a new network via Contrail.

        Scenario:
            1. Setup systest_setup.
            2. Create a new network via Contrail API.
            3. Launch 2 new instance in the network with default security group
               via Horizon API.
            4. Check ping connectivity between instances.
            5. Verify on Contrail controller WebUI that network is there and
               VMs are attached to it.

        Duration: 15 min

        """
        # constants
        net_name = 'net_1'
        self.show_step(1)
        self.env.revert_snapshot('systest_setup')
        cluster_id = self.fuel_web.get_last_created_cluster()

        self.show_step(2)
        os_ip = self.fuel_web.get_public_vip(cluster_id)
        contrail_client = ContrailClient(os_ip)
        network = contrail_client.create_network(
            ["default-domain", SERVTEST_TENANT, net_name], [{
                "attr": {
                    "ipam_subnets": [{
                        "subnet": {
                            "ip_prefix": '10.1.1.0',
                            "ip_prefix_len": 24
                        }
                    }]
                },
                "to":
                ["default-domain", "default-project", "default-network-ipam"]
            }])['virtual-network']
        default_router = contrail_client.get_router_by_name(SERVTEST_TENANT)
        contrail_client.add_router_interface(network, default_router)

        self.show_step(3)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        hypervisors = os_conn.get_hypervisors()
        instances = []
        fips = []
        for hypervisor in hypervisors:
            instance = os_conn.create_server_for_migration(
                neutron=True,
                availability_zone="nova:{0}".format(
                    hypervisor.hypervisor_hostname),
                label=net_name)
            instances.append(instance)
            ip = os_conn.assign_floating_ip(instance).ip
            wait(lambda: tcp_ping(ip, 22),
                 timeout=60,
                 interval=5,
                 timeout_msg="Node {0} is not accessible by SSH.".format(ip))
            fips.append(ip)

        self.show_step(4)
        ip_pair = dict.fromkeys(fips)
        for key in ip_pair:
            ip_pair[key] = [value for value in fips if key != value]
        controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[1])
        self.ping_instance_from_instance(os_conn, controller.name, ip_pair)

        self.show_step(5)
        net_info = contrail_client.get_net_by_id(
            network['uuid'])['virtual-network']
        net_interface_ids = []
        for interface in net_info['virtual_machine_interface_back_refs']:
            net_interface_ids.append(interface['uuid'])
        for instance in instances:
            interf_id = contrail_client.get_instance_by_id(
                instance.id)['virtual-machine'][
                    'virtual_machine_interface_back_refs'][0]['uuid']
            assert_true(
                interf_id in net_interface_ids,
                '{0} is not attached to network {1}'.format(
                    instance.name, net_name))