Пример #1
0
 def node_rebooted(ip, interval=5, timeout=60 * 15):
     wait(lambda: not icmp_ping(ip), interval=interval, timeout=timeout,
          timeout_msg=("Node with ip: {} has not become offline after "
                       "starting reboot").format(ip))
     wait(lambda: icmp_ping(ip), interval=interval, timeout=timeout,
          timeout_msg="Node with ip: {} has not become online "
                      "after reboot".format(ip))
 def node_rebooted(ip, interval=5, timeout=60 * 15):
     wait(
         lambda: not icmp_ping(ip),
         interval=interval,
         timeout=timeout,
         timeout_msg=("Node with ip: {} has not become offline after " "starting reboot").format(ip),
     )
     wait(
         lambda: icmp_ping(ip),
         interval=interval,
         timeout=timeout,
         timeout_msg="Node with ip: {} has not become online " "after reboot".format(ip),
     )
Пример #3
0
    def check_migration_status(self):
        """Check periodically the status of Fuel Master migration process"""

        with self.env.d_env.get_admin_remote() as remote:
            checkers.wait_phrase_in_log(
                remote,
                60 * 60,
                interval=0.2,
                phrase='Rebooting to begin the data sync process',
                log_path='/var/log/fuel-migrate.log')
        logger.info(
            'Rebooting to begin the data sync process for fuel migrate')

        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             timeout_msg='Master node has not become offline '
             'after starting reboot')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             timeout_msg='Master node has not become online '
             'after rebooting')
        self.env.d_env.nodes().admin. await (
            network_name=self.env.d_env.admin_net, timeout=60 * 15)

        with self.env.d_env.get_admin_remote() as remote:
            checkers.wait_phrase_in_log(
                remote,
                60 * 90,
                interval=0.1,
                phrase='Stop network and up with new settings',
                log_path='/var/log/fuel-migrate.log')
        logger.info('Shutting down network')

        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             interval=0.1,
             timeout_msg='Master node has not become offline on '
             'shutting network down')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             timeout_msg='Master node has not become online after '
             'shutting network down')

        self.env.d_env.nodes().admin. await (
            network_name=self.env.d_env.admin_net, timeout=60 * 10)

        with self.env.d_env.get_admin_remote() as remote:
            wait(lambda: not remote.exists("/notready"),
                 timeout=900,
                 timeout_msg=("File wasn't removed in 900 sec"))
    def check_migration_status(self):
        """Check periodically the status of Fuel Master migration process"""

        logger.info('First reboot of Master node...')

        logger.info('Wait for Master node become offline')
        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 10,
             timeout_msg='Master node did not become offline')

        logger.info('Wait for echo from Master node')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 10,
             timeout_msg='Master node did not respond after reboot')

        logger.info('Wait for Master node become online')
        self.env.d_env.nodes().admin.await(
            network_name=self.env.d_env.admin_net,
            timeout=60 * 10)

        logger.info('Second reboot of Master node...')

        logger.info('Wait for Master node become offline')
        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 40,
             timeout_msg='Master node did not become offline')

        logger.info('Wait for echo from Master node')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 10,
             timeout_msg='Master node did not respond after reboot')

        logger.info('Wait for Master node become online')
        self.env.d_env.nodes().admin.await(
            network_name=self.env.d_env.admin_net,
            timeout=60 * 10)

        logger.info("Wait for file 'migration-done' appears")
        with self.env.d_env.get_admin_remote() as remote:
            wait(lambda: remote.exists("/tmp/migration-done"),
                 timeout=60 * 10,
                 timeout_msg="File /tmp/migration-done wasn't appeared")
            logger.info("Migration complete!")

        logger.info("Wait for Slave nodes become online")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:2], timeout=60 * 20)
Пример #5
0
    def check_migration_status(self):
        """Check periodically the status of Fuel Master migration process"""

        checkers.wait_phrase_in_log(
            self.env.get_admin_node_ip(), 60 * 60, interval=0.2,
            phrase='Rebooting to begin the data sync process',
            log_path='/var/log/fuel-migrate.log')
        logger.info(
            'Rebooting to begin the data sync process for fuel migrate')

        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15, timeout_msg='Master node has not become offline '
                                          'after starting reboot')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15, timeout_msg='Master node has not become online '
                                          'after rebooting')
        self.env.d_env.nodes().admin.await(
            network_name=self.env.d_env.admin_net,
            timeout=60 * 15)

        checkers.wait_phrase_in_log(
            self.env.get_admin_node_ip(), 60 * 90, interval=0.1,
            phrase='Stop network and up with new settings',
            log_path='/var/log/fuel-migrate.log')
        logger.info('Shutting down network')

        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15, interval=0.1,
             timeout_msg='Master node has not become offline on '
                         'shutting network down')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             timeout_msg='Master node has not become online after '
                         'shutting network down')

        self.env.d_env.nodes().admin.await(
            network_name=self.env.d_env.admin_net,
            timeout=60 * 10)

        with self.env.d_env.get_admin_remote() as remote:
            wait(lambda: not remote.exists("/notready"),
                 timeout=900,
                 timeout_msg="File wasn't removed in 900 sec")

        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:2])
Пример #6
0
 def test_icmp_ping(self, caller):
     host = '127.0.0.1'
     timeout = 1
     result = helpers.icmp_ping(host=host)
     caller.assert_called_once_with(
         "ping -c 1 -W '{timeout:d}' '{host:s}'".format(host=host,
                                                        timeout=timeout))
     self.assertTrue(result, 'Unexpected result of validation')
Пример #7
0
    def check_migration_status(self):
        """Check periodically the status of Fuel Master migration process"""

        logger.info('First reboot of Master node...')

        logger.info('Wait for Master node become offline')
        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 10,
             timeout_msg='Master node did not become offline')

        logger.info('Wait for echo from Master node')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 10,
             timeout_msg='Master node did not respond after reboot')

        logger.info('Wait for Master node become online')
        self.env.d_env.nodes().admin. await (network_name='admin',
                                             timeout=60 * 10)

        logger.info('Second reboot of Master node...')

        logger.info('Wait for Master node become offline')
        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 40,
             timeout_msg='Master node did not become offline')

        logger.info('Wait for echo from Master node')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 10,
             timeout_msg='Master node did not respond after reboot')

        logger.info('Wait for Master node become online')
        self.env.d_env.nodes().admin. await (network_name='admin',
                                             timeout=60 * 10)

        logger.info("Wait for file 'migration-done' appears")
        with self.env.d_env.get_admin_remote() as remote:
            wait(lambda: remote.exists("/tmp/migration-done"),
                 timeout=60 * 10,
                 timeout_msg="File /tmp/migration-done wasn't appeared")
            logger.info("Migration complete!")

        logger.info("Wait for Slave nodes become online")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:2], timeout=60 * 20)
Пример #8
0
def create_and_assign_floating_ips(os_conn, instance):
    """Create Vms on available hypervisors.

    :param os_conn: type object, openstack
    :param instance: type string, name of  instance
    """
    ip = os_conn.assign_floating_ip(instance).ip
    wait(lambda: icmp_ping(ip), timeout=60 * 5, interval=5)
    return ip
Пример #9
0
 def test_icmp_ping(self, caller):
     host = '127.0.0.1'
     timeout = 1
     result = helpers.icmp_ping(host=host)
     caller.assert_called_once_with(
         "ping -c 1 -W '{timeout:d}' '{host:s}'".format(
             host=host, timeout=timeout
         ))
     self.assertTrue(result, 'Unexpected result of validation')
Пример #10
0
 def test_icmp_ping(self, caller):
     caller.return_value = 0
     host = '127.0.0.1'
     timeout = 1
     result = helpers.icmp_ping(host=host)
     caller.assert_called_once_with(
         "ping -c 1 -W '%(timeout)d' '%(host)s' 1>/dev/null 2>&1" % {
             'host': host,
             'timeout': timeout
         })
     self.assertTrue(result, 'Unexpected result of validation')
Пример #11
0
def create_and_assign_floating_ips(os_conn, instances_list):
    """Create Vms on available hypervisors.

    :param os_conn: type object, openstack
    :param instances_list: type list, instances
    """
    fips = []
    for instance in instances_list:
        ip = os_conn.assign_floating_ip(instance).ip
        fips.append(ip)
        wait(lambda: icmp_ping(ip), timeout=60 * 5, interval=5)
    return fips
Пример #12
0
def create_and_assign_floating_ips(os_conn, instances_list):
    """Associate floating ips with specified instances.

    :param os_conn: type object, openstack
    :param instances_list: type list, instances
    """
    fips = []
    for instance in instances_list:
        ip = os_conn.assign_floating_ip(instance).ip
        fips.append(ip)
        wait(lambda: icmp_ping(ip), timeout=60 * 5, interval=5)
    return fips
Пример #13
0
def create_and_assign_floating_ips(os_conn, instances_list):
    """Associate floating ips with specified instances.

    :param os_conn: type object, openstack
    :param instances_list: type list, instances
    """
    fips = []
    for instance in instances_list:
            ip = os_conn.assign_floating_ip(instance).ip
            fips.append(ip)
            wait(lambda: icmp_ping(ip), timeout=60 * 5, interval=5)
    return fips
def create_and_assign_floating_ips(os_conn, instances_list):
    """Create Vms on available hypervisors.

    :param os_conn: type object, openstack
    :param instances_list: type list, instances
    """
    fips = []
    for instance in instances_list:
            ip = os_conn.assign_floating_ip(
                instance).ip
            fips.append(ip)
            wait(lambda: icmp_ping(ip), timeout=60 * 5, interval=5)
    return fips
Пример #15
0
    def fuel_migration(self):
        """Fuel master migration to VM

        Scenario:

            1. Create cluster
            2. Run OSTF tests
            3. Run Network check
            4. Migrate fuel-master to VM
            5. Run OSTF tests
            6. Run Network check
            7. Check statuses for master services

        Duration 210m
        """
        self.env.revert_snapshot("ready_with_3_slaves")
        data = {
            'net_provider': 'neutron',
            'net_segment_type': settings.NEUTRON_SEGMENT_TYPE
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings=data)

        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })

        # Check network
        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # Check network
        self.fuel_web.verify_network(cluster_id)

        # Fuel migration
        remote = self.env.d_env.get_admin_remote()
        logger.info('Fuel migration on compute slave-02')

        result = remote.execute(
            'fuel-migrate ' +
            self.fuel_web.get_nailgun_node_by_name('slave-02')['ip'] +
            ' >/dev/null &')
        assert_equal(
            result['exit_code'], 0,
            'Failed to execute "{0}" on remote host: {1}'.format(
                'fuel-migrate' + self.env.d_env.nodes().slaves[0].name,
                result))
        checkers.wait_phrase_in_log(remote,
                                    60 * 60,
                                    interval=0.2,
                                    phrase='Rebooting to begin '
                                    'the data sync process',
                                    log_path='/var/log/fuel-migrate.log')
        remote.clear()
        logger.info('Rebooting to begin the data sync process for fuel '
                    'migrate')

        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             timeout_msg='Master node has not become offline '
             'after rebooting')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             timeout_msg='Master node has not become online '
             'after rebooting')
        self.env.d_env.nodes().admin. await (network_name=self.d_env.admin_net,
                                             timeout=60 * 15)
        with self.env.d_env.get_admin_remote() as remote:
            checkers.wait_phrase_in_log(remote,
                                        60 * 90,
                                        interval=0.1,
                                        phrase='Stop network and up with '
                                        'new settings',
                                        log_path='/var/log/fuel-migrate.log')
        logger.info('Shutting down network')

        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             interval=0.1,
             timeout_msg='Master node has not become offline shutting network')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             timeout_msg='Master node has not become online shutting network')

        self.env.d_env.nodes().admin. await (network_name=self.d_env.admin_net,
                                             timeout=60 * 10)

        logger.info("Check containers")
        self.env.docker_actions.wait_for_ready_containers(timeout=60 * 30)

        logger.info("Check services")
        cluster_id = self.fuel_web.get_last_created_cluster()
        self.fuel_web.assert_ha_services_ready(cluster_id)
        self.fuel_web.assert_os_services_ready(cluster_id)

        # Check network
        self.fuel_web.verify_network(cluster_id)

        # Run ostf
        _wait(lambda: self.fuel_web.run_ostf(cluster_id,
                                             test_sets=['smoke', 'sanity']),
              timeout=1500)
        logger.debug("OSTF tests are pass now")
Пример #16
0
    def extended_tests_reset_vcenter(self, openstack_ip):
        """Common verification of dvs_reboot_vcenter* test cases.

        :param openstack_ip: type string, openstack ip
        """
        os_conn = os_actions.OpenStackActions(openstack_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        # Create security group with rules for ssh and ping
        security_group = os_conn.create_sec_group_for_ssh()

        _sec_groups = os_conn.neutron.list_security_groups()['security_groups']
        _serv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id
        default_sg = [
            sg for sg in _sec_groups
            if sg['tenant_id'] == _serv_tenant_id and sg['name'] == 'default'
        ][0]

        network = os_conn.nova.networks.find(label=self.inter_net_name)

        # Create access point server
        _, access_point_ip = openstack.create_access_point(
            os_conn=os_conn,
            nics=[{
                'net-id': network.id
            }],
            security_groups=[security_group.name, default_sg['name']])

        self.show_step(11)
        self.show_step(12)
        instances = openstack.create_instances(
            os_conn=os_conn,
            nics=[{
                'net-id': network.id
            }],
            vm_count=1,
            security_groups=[default_sg['name']])
        openstack.verify_instance_state(os_conn)

        # Get private ips of instances
        ips = [
            os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)
            for i in instances
        ]
        time.sleep(30)
        self.show_step(13)
        openstack.ping_each_other(ips=ips, access_point_ip=access_point_ip)

        self.show_step(14)
        vcenter_name = [
            name for name in self.WORKSTATION_NODES if 'vcenter' in name
        ].pop()
        node = vmrun.Vmrun(self.host_type,
                           self.path_to_vmx_file.format(vcenter_name),
                           host_name=self.host_name,
                           username=self.WORKSTATION_USERNAME,
                           password=self.WORKSTATION_PASSWORD)
        node.reset()

        self.show_step(15)
        wait(lambda: not icmp_ping(self.VCENTER_IP),
             interval=1,
             timeout=10,
             timeout_msg='vCenter is still available.')

        self.show_step(16)
        wait(lambda: icmp_ping(self.VCENTER_IP),
             interval=5,
             timeout=120,
             timeout_msg='vCenter is not available.')

        self.show_step(17)
        openstack.ping_each_other(ips=ips, access_point_ip=access_point_ip)
Пример #17
0
    def fuel_migration(self):
        """Fuel master migration to VM

        Scenario:

            1. Create cluster
            2. Run OSTF tests
            3. Run Network check
            4. Migrate fuel-master to VM
            5. Run OSTF tests
            6. Run Network check
            7. Check statuses for master services

        Duration 210m
        """
        self.env.revert_snapshot("ready_with_3_slaves")
        data = {
            'net_provider': 'neutron',
            'net_segment_type': settings.NEUTRON_SEGMENT_TYPE
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings=data)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute']
            }
        )

        # Check network
        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # Check network
        self.fuel_web.verify_network(cluster_id)

        # Fuel migration
        remote = self.env.d_env.get_admin_remote()
        logger.info('Fuel migration on compute slave-02')

        result = remote.execute('fuel-migrate ' + self.fuel_web.
                                get_nailgun_node_by_name('slave-02')['ip'] +
                                ' >/dev/null &')
        assert_equal(result['exit_code'], 0,
                     'Failed to execute "{0}" on remote host: {1}'.
                     format('fuel-migrate' + self.env.d_env.nodes().slaves[0].
                            name, result))
        checkers.wait_phrase_in_log(remote, 60 * 60, interval=0.2,
                                    phrase='Rebooting to begin '
                                           'the data sync process',
                                    log_path='/var/log/fuel-migrate.log')
        remote.clear()
        logger.info('Rebooting to begin the data sync process for fuel '
                    'migrate')

        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15, timeout_msg='Master node has not become offline '
                                          'after rebooting')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15, timeout_msg='Master node has not become online '
                                          'after rebooting')
        self.env.d_env.nodes().admin.await(network_name=self.d_env.admin_net,
                                           timeout=60 * 15)
        with self.env.d_env.get_admin_remote() as remote:
            checkers.wait_phrase_in_log(remote,
                                        60 * 90, interval=0.1,
                                        phrase='Stop network and up with '
                                               'new settings',
                                        log_path='/var/log/fuel-migrate.log')
        logger.info('Shutting down network')

        wait(lambda: not icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15, interval=0.1,
             timeout_msg='Master node has not become offline shutting network')
        wait(lambda: icmp_ping(self.env.get_admin_node_ip()),
             timeout=60 * 15,
             timeout_msg='Master node has not become online shutting network')

        self.env.d_env.nodes().admin.await(network_name=self.d_env.admin_net,
                                           timeout=60 * 10)

        logger.info("Check containers")
        self.env.docker_actions.wait_for_ready_containers(timeout=60 * 30)

        logger.info("Check services")
        cluster_id = self.fuel_web.get_last_created_cluster()
        self.fuel_web.assert_ha_services_ready(cluster_id)
        self.fuel_web.assert_os_services_ready(cluster_id)

        # Check network
        self.fuel_web.verify_network(cluster_id)

        # Run ostf
        _wait(lambda:
              self.fuel_web.run_ostf(cluster_id,
                                     test_sets=['smoke', 'sanity']),
              timeout=1500)
        logger.debug("OSTF tests are pass now")
    def extended_tests_reset_vcenter(self, openstack_ip):
        """Common verification of dvs_reboot_vcenter* test cases.

        :param openstack_ip: type string, openstack ip
        """
        os_conn = os_actions.OpenStackActions(
            openstack_ip, SERVTEST_USERNAME,
            SERVTEST_PASSWORD,
            SERVTEST_TENANT)

        # Create security group with rules for ssh and ping
        security_group = os_conn.create_sec_group_for_ssh()

        _sec_groups = os_conn.neutron.list_security_groups()['security_groups']
        _serv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id
        default_sg = [sg for sg in _sec_groups
                      if sg['tenant_id'] == _serv_tenant_id and
                      sg['name'] == 'default'][0]

        network = os_conn.nova.networks.find(label=self.inter_net_name)

        # Create access point server
        _, access_point_ip = openstack.create_access_point(
            os_conn=os_conn,
            nics=[{'net-id': network.id}],
            security_groups=[security_group.name, default_sg['name']])

        self.show_step(11)
        self.show_step(12)
        instances = openstack.create_instances(
            os_conn=os_conn,
            nics=[{'net-id': network.id}],
            vm_count=1,
            security_groups=[default_sg['name']])
        openstack.verify_instance_state(os_conn)

        # Get private ips of instances
        ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)
               for i in instances]
        time.sleep(30)
        self.show_step(13)
        openstack.ping_each_other(ips=ips, access_point_ip=access_point_ip)

        self.show_step(14)
        vcenter_name = [name for name in self.WORKSTATION_NODES
                        if 'vcenter' in name].pop()
        node = vmrun.Vmrun(
            self.host_type,
            self.path_to_vmx_file.format(vcenter_name),
            host_name=self.host_name,
            username=self.WORKSTATION_USERNAME,
            password=self.WORKSTATION_PASSWORD)
        node.reset()

        self.show_step(15)
        wait(lambda: not icmp_ping(self.VCENTER_IP),
             interval=1,
             timeout=10,
             timeout_msg='vCenter is still available.')

        self.show_step(16)
        wait(lambda: icmp_ping(self.VCENTER_IP),
             interval=5,
             timeout=120,
             timeout_msg='vCenter is not available.')

        self.show_step(17)
        openstack.ping_each_other(ips=ips, access_point_ip=access_point_ip)
    def extended_tests_reset_vcenter(self, openstack_ip):
        """Common verification of dvs_reboot_vcenter* test cases.

        :param openstack_ip: type string, openstack ip
        """
        admin = os_actions.OpenStackActions(openstack_ip, SERVTEST_USERNAME,
                                            SERVTEST_PASSWORD, SERVTEST_TENANT)

        # create security group with rules for ssh and ping
        security_group = admin.create_sec_group_for_ssh()

        default_sg = [
            sg
            for sg in admin.neutron.list_security_groups()['security_groups']
            if sg['tenant_id'] == admin.get_tenant(SERVTEST_TENANT).id
            if sg['name'] == 'default'
        ][0]

        network = admin.nova.networks.find(label=self.inter_net_name)

        # create access point server
        access_point, access_point_ip = openstack.create_access_point(
            os_conn=admin,
            nics=[{
                'net-id': network.id
            }],
            security_groups=[security_group.name, default_sg['name']])

        self.show_step(13)
        self.show_step(14)
        instances = openstack.create_instances(
            os_conn=admin,
            nics=[{
                'net-id': network.id
            }],
            vm_count=1,
            security_groups=[default_sg['name']])
        openstack.verify_instance_state(admin)

        # Get private ips of instances
        ips = []
        for instance in instances:
            ips.append(
                admin.get_nova_instance_ip(instance,
                                           net_name=self.inter_net_name))
        time.sleep(30)
        self.show_step(15)
        for ip in ips:
            ping_result = openstack.remote_execute_command(
                access_point_ip, ip, "ping -c 5 {}".format(ip))
            assert_true(ping_result['exit_code'] == 0,
                        "Ping isn't available from {0} to {1}".format(ip, ip))

        self.show_step(16)
        vcenter_name = [
            name for name in self.WORKSTATION_NODES if 'vcenter' in name
        ].pop()
        node = vmrun.Vmrun(self.host_type,
                           self.path_to_vmx_file.format(vcenter_name),
                           host_name=self.host_name,
                           username=self.WORKSTATION_USERNAME,
                           password=self.WORKSTATION_PASSWORD)
        node.reset()

        self.show_step(17)
        wait(lambda: not icmp_ping(self.VCENTER_IP),
             interval=1,
             timeout=10,
             timeout_msg='Vcenter is still availabled.')

        self.show_step(18)
        wait(lambda: icmp_ping(self.VCENTER_IP),
             interval=5,
             timeout=120,
             timeout_msg='Vcenter is not availabled.')

        self.show_step(20)
        for ip in ips:
            ping_result = openstack.remote_execute_command(
                access_point_ip, ip, "ping -c 5 {}".format(ip))
            assert_true(ping_result['exit_code'] == 0,
                        "Ping isn't available from {0} to {1}".format(ip, ip))
    def extended_tests_reset_vcenter(self, openstack_ip):
        """Common verification of dvs_reboot_vcenter* test cases.

        :param openstack_ip: type string, openstack ip
        """
        admin = os_actions.OpenStackActions(
            openstack_ip, SERVTEST_USERNAME,
            SERVTEST_PASSWORD,
            SERVTEST_TENANT)

        # create security group with rules for ssh and ping
        security_group = admin.create_sec_group_for_ssh()

        default_sg = [
            sg
            for sg in admin.neutron.list_security_groups()['security_groups']
            if sg['tenant_id'] == admin.get_tenant(SERVTEST_TENANT).id
            if sg['name'] == 'default'][0]

        network = admin.nova.networks.find(label=self.inter_net_name)

        # create access point server
        access_point, access_point_ip = openstack.create_access_point(
            os_conn=admin, nics=[{'net-id': network.id}],
            security_groups=[security_group.name, default_sg['name']])

        self.show_step(13)
        self.show_step(14)
        instances = openstack.create_instances(
            os_conn=admin, nics=[{'net-id': network.id}],
            vm_count=1,
            security_groups=[default_sg['name']])
        openstack.verify_instance_state(admin)

        # Get private ips of instances
        ips = []
        for instance in instances:
            ips.append(admin.get_nova_instance_ip(
                instance, net_name=self.inter_net_name))
        time.sleep(30)
        self.show_step(15)
        for ip in ips:
            ping_result = openstack.remote_execute_command(
                access_point_ip, ip, "ping -c 5 {}".format(ip))
            assert_true(
                ping_result['exit_code'] == 0,
                "Ping isn't available from {0} to {1}".format(ip, ip)
            )

        self.show_step(16)
        vcenter_name = [
            name for name in self.WORKSTATION_NODES if 'vcenter' in name].pop()
        node = vmrun.Vmrun(
            self.host_type,
            self.path_to_vmx_file.format(vcenter_name),
            host_name=self.host_name,
            username=self.WORKSTATION_USERNAME,
            password=self.WORKSTATION_PASSWORD)
        node.reset()

        self.show_step(17)
        wait(lambda: not icmp_ping(
            self.VCENTER_IP), interval=1, timeout=10,
            timeout_msg='Vcenter is still availabled.')

        self.show_step(18)
        wait(lambda: icmp_ping(
            self.VCENTER_IP), interval=5, timeout=120,
            timeout_msg='Vcenter is not availabled.')

        self.show_step(20)
        for ip in ips:
            ping_result = openstack.remote_execute_command(
                access_point_ip, ip, "ping -c 5 {}".format(ip))
            assert_true(
                ping_result['exit_code'] == 0,
                "Ping isn't available from {0} to {1}".format(ip, ip)
            )