def reconfigure_ml2_vlan_range(self):
        """Reconfigure neutron ml2 VLAN range

        Scenario:
            1. Revert snapshot "basic_env_for_reconfiguration"
            2. Upload a new openstack configuration
            3. Get uptime of process "neutron-server" on each controller
            4. Apply a new VLAN range(minimal range) to all nodes
            5. Wait for configuration applying
            6. Check that service "neutron-server" was restarted
            7. Verify ml2 plugin settings
            8. Create new private network
            9. Try to create one more, verify that it is impossible

        Snapshot: reconfigure_ml2_vlan_range

        """
        self.check_run('reconfigure_ml2_vlan_range')
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("basic_env_for_reconfiguration")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        self.show_step(2)
        config = utils.get_config_template('neutron')
        structured_config = get_structured_config_dict(config)
        self.fuel_web.client.upload_configuration(config, cluster_id)

        self.show_step(3)
        service_name = 'neutron-server'
        uptimes = self.get_service_uptime(controllers, service_name)

        self.show_step(4)
        task = self.fuel_web.client.apply_configuration(cluster_id)

        self.show_step(5)
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(6)
        self.check_service_was_restarted(controllers, uptimes, service_name)

        self.show_step(7)
        self.check_config_on_remote(controllers, structured_config)

        self.show_step(8)
        self.show_step(9)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.check_ml2_vlan_range(os_conn)

        self.env.make_snapshot("reconfigure_ml2_vlan_range", is_make=True)
    def reconfigure_keystone_to_use_ldap(self):
        """Reconfigure neutron ml2 VLAN range

        Scenario:
            1. Revert snapshot "deploy_neutron_vlan_ha"
            2. Upload a new openstack configuration
            3. Try to apply a new keystone configuration
            4. Wait for failing of deployment task
            5. Check that reason of failing is impossibility of
               the connection to LDAP server

        Snapshot reconfigure_keystone_to_use_ldap

        """
        self.show_step(1)
        self.env.revert_snapshot("deploy_neutron_vlan_ha")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        ldap_cntrllr = controllers[0]

        self.show_step(2)
        config = utils.get_config_template('keystone_ldap')
        self.fuel_web.client.upload_configuration(
            config,
            cluster_id,
            node_id=ldap_cntrllr['id'])

        self.show_step(3)
        task = self.fuel_web.client.apply_configuration(
            cluster_id,
            node_id=ldap_cntrllr['id'])

        self.show_step(4)
        try:
            self.fuel_web.assert_task_success(task, timeout=1800, interval=30)
        except AssertionError:
            pass
        else:
            raise Exception("New configuration was not applied")

        self.show_step(5)
        with self.env.d_env.get_ssh_to_remote(ldap_cntrllr['ip']) as remote:
            log_path = '/var/log/puppet.log'
            cmd = "grep \"Can't contact LDAP server\" {0}".format(log_path)
            utils.run_on_remote_get_results(remote, cmd)

        self.env.make_snapshot("reconfigure_keystone_to_use_ldap",
                               is_make=True)
    def reconfigure_keystone_to_use_ldap(self):
        """Reconfigure keystone to use LDAP

        Scenario:
            1. Revert snapshot "basic_env_for_reconfiguration"
            2. Upload a new openstack configuration
            3. Try to apply a new keystone configuration
            4. Wait for failing of deployment task
            5. Check that reason of failing is impossibility of
               the connection to LDAP server

        Snapshot: reconfigure_keystone_to_use_ldap

        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("basic_env_for_reconfiguration")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        ldap_cntrllr = controllers[0]

        self.show_step(2)
        config = utils.get_config_template('keystone_ldap')
        self.fuel_web.client.upload_configuration(
            config,
            cluster_id,
            node_id=ldap_cntrllr['id'])

        self.show_step(3)
        task = self.fuel_web.client.apply_configuration(
            cluster_id,
            node_id=ldap_cntrllr['id'])

        self.show_step(4)
        try:
            self.fuel_web.assert_task_success(task, timeout=1800, interval=30)
        except AssertionError:
            pass
        else:
            raise Exception("New configuration was not applied")

        self.show_step(5)
        with self.env.d_env.get_ssh_to_remote(ldap_cntrllr['ip']) as remote:
            log_path = '/var/log/puppet.log'
            cmd = "grep \"Can't contact LDAP server\" {0}".format(log_path)
            utils.run_on_remote_get_results(remote, cmd)

        self.env.make_snapshot("reconfigure_keystone_to_use_ldap")
    def reconfiguration_scalability(self):
        """Check scalability of configured environment

        Scenario:
            1. Revert snapshot "reconfigure_nova_ephemeral_disk"
            2. Upload a new openstack configuration for keystone
            3. Wait for configuration applying
            4. Verify keystone settings
            5. Keystone actions
            6. Add 1 compute and 1 controller to cluster
            7. Run network verification
            8. Deploy changes
            9. Run OSTF tests
            10. Verify keystone settings
            11. Verify nova settings
            12. Create flavor with ephemral disk
            13. Boot instance on updated compute with ephemral disk
            14. Assign floating ip
            15. Check ping to the instance
            16. SSH to VM and check ephemeral disk format
            17. Keystone actions

        Snapshot "reconfiguration_scalability"
        """

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("reconfigure_nova_ephemeral_disk")

        self.show_step(2)
        cluster_id = self.fuel_web.get_last_created_cluster()
        config = utils.get_config_template('nova_disk')
        structured_config_nova = get_structured_config_dict(config)
        config = utils.get_config_template('keystone')
        structured_config_keystone = get_structured_config_dict(config)
        self.fuel_web.client.upload_configuration(config,
                                                  cluster_id,
                                                  role='controller')
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        self.show_step(3)
        task = self.fuel_web.client.apply_configuration(cluster_id,
                                                        role='controller')
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(4)
        self.check_config_on_remote(controllers, structured_config_keystone)

        self.show_step(5)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        time_expiration = config[
            'keystone_config']['token/expiration']['value']
        self.check_token_expiration(os_conn, time_expiration)

        self.show_step(6)
        bs_nodes = [x for x in self.env.d_env.get_nodes()
                    if x.name == 'slave-05' or x.name == 'slave-06']
        self.env.bootstrap_nodes(bs_nodes)
        self.fuel_web.update_nodes(
            cluster_id,
            {'slave-05': ['compute']})
        self.fuel_web.update_nodes(
            cluster_id,
            {'slave-06': ['controller']})

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(10)
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])
        computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])
        target_controller = [x for x in controllers
                             if 'slave-06' in x['name']]
        target_compute = [x for x in computes
                          if 'slave-05' in x['name']]
        self.check_config_on_remote(target_controller,
                                    structured_config_keystone)

        self.show_step(11)
        self.check_config_on_remote(target_compute, structured_config_nova)

        self.show_step(12)
        self.show_step(13)
        self.show_step(14)
        self.show_step(15)
        self.show_step(16)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        hypervisor_name = target_compute[0]['fqdn']
        self.check_nova_ephemeral_disk(os_conn, cluster_id,
                                       hypervisor_name=hypervisor_name)

        self.show_step(17)
        self.check_token_expiration(os_conn, time_expiration)

        self.env.make_snapshot("reconfiguration_scalability", is_make=True)
    def preservation_config_after_reset_and_preconfigured_deploy(self):
        """Preservation config after reset of cluster and preconfigured deploy

        Scenario:
            1. Revert snapshot reconfigure_ml2_vlan_range
            2. Reset cluster
            3. Upload a new openstack configuration for nova
            4. Deploy changes
            5. Run OSTF
            6. Verify nova and neutron settings
            7. Create new private network
            8. Try to create one more, verify that it is impossible
            9. Boot instances with flavor that occupy all CPU
            10. Boot extra instance and catch the error

        Snapshot "preservation_config_after_reset_and_preconfigured_deploy"

        """

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("reconfigure_ml2_vlan_range")

        self.show_step(2)
        cluster_id = self.fuel_web.get_last_created_cluster()
        self.fuel_web.stop_reset_env_wait(cluster_id)

        self.show_step(3)
        config = utils.get_config_template('nova_cpu')
        structured_config_nova = get_structured_config_dict(config)
        self.fuel_web.client.upload_configuration(config,
                                                  cluster_id,
                                                  role='controller')
        config = utils.get_config_template('neutron')
        structured_config_neutron = get_structured_config_dict(config)

        self.show_step(4)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:4], timeout=10 * 60)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(5)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.show_step(6)
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])
        structured_config = {}
        structured_config.update(structured_config_neutron)
        structured_config.update(structured_config_nova)
        self.check_config_on_remote(controllers, structured_config)

        self.show_step(7)
        self.show_step(8)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.check_ml2_vlan_range(os_conn)

        self.show_step(9)
        self.show_step(10)
        self.check_overcommit_ratio(os_conn, cluster_id)

        snapshot = "preservation_config_after_reset_and_preconfigured_deploy"
        self.env.make_snapshot(snapshot, is_make=True)
    def reconfigure_nova_ephemeral_disk(self):
        """Reconfigure nova ephemeral disk format

        Scenario:
            1. Revert snapshot reconfigure_overcommit_ratio
            2. Delete previous OpenStack config
            3. Upload a new openstack configuration for nova on computes
            4. Apply configuration
            5. Wait for configuration applying
            6. Get uptime of process "nova-compute" on each compute
            7. Verify nova-compute settings
            8. Create flavor with ephemral disk,
            9. Boot instance on updated compute with ephemral disk,
            10. Assign floating ip,
            11. Check ping to the instance,
            12. SSH to VM and check ephemeral disk format

        Snapshot: reconfigure_nova_ephemeral_disk

        """
        self.check_run('reconfigure_nova_ephemeral_disk')
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("reconfigure_overcommit_ratio")

        cluster_id = self.fuel_web.get_last_created_cluster()
        computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])

        self.show_step(2)
        existing_configs = self.fuel_web.client.list_configuration(
            cluster_id)
        for existing_config in existing_configs:
            self.fuel_web.client.delete_configuration(existing_config["id"])

        self.show_step(3)
        config = utils.get_config_template('nova_disk')
        structured_config = get_structured_config_dict(config)
        self.fuel_web.client.upload_configuration(config,
                                                  cluster_id,
                                                  role='compute')

        service_name = "nova-compute"

        uptimes = self.get_service_uptime(computes, service_name)

        self.show_step(4)
        task = self.fuel_web.client.apply_configuration(cluster_id,
                                                        role='compute')
        self.show_step(5)
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(6)
        self.check_service_was_restarted(computes, uptimes, service_name)

        self.show_step(7)
        self.check_config_on_remote(computes, structured_config)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        self.show_step(8)
        self.show_step(9)
        self.show_step(10)
        self.show_step(11)
        self.show_step(12)
        self.check_nova_ephemeral_disk(os_conn, cluster_id)

        self.env.make_snapshot("reconfigure_nova_ephemeral_disk",
                               is_make=True)
    def reconfigure_nova_quota(self):
        """Tests for reconfiguration nova quota.

        Scenario:
            1. Revert snapshot "basic_env_for_reconfiguration"
            2. Upload a new openstack configuration
            3. Get uptime of process "nova-api" on each controller
            4. Get uptime of process "nova-compute" on each compute
            5. Apply a new quota driver and quota_instances to all nodes
            6. Wait for configuration applying
            7. Verify uptime of process "nova-api" on each controller
            8. Verify uptime of process "nova-compute" on each compute
            9. Verify nova config settings
            10. Create new instance
            11. Try to create one more, verify that it is impossible

        Snapshot: reconfigure_nova_quota

        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("basic_env_for_reconfiguration")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])

        self.show_step(2)
        config = utils.get_config_template('nova_quota')
        structured_config = get_structured_config_dict(config)
        self.fuel_web.client.upload_configuration(config, cluster_id)

        self.show_step(3)
        uptimes = self.get_service_uptime(controllers, 'nova-api')

        self.show_step(4)
        uptimes_comp = self.get_service_uptime(computes, 'nova-compute')

        self.show_step(5)
        task = self.fuel_web.client.apply_configuration(cluster_id)

        self.show_step(6)
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(7)
        self.check_service_was_restarted(controllers, uptimes, 'nova-api')

        self.show_step(8)
        self.check_service_was_restarted(computes, uptimes_comp,
                                         'nova-compute')

        self.show_step(9)
        self.check_config_on_remote(controllers, structured_config)

        self.show_step(10)
        self.show_step(11)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        self.check_nova_quota(os_conn, cluster_id)

        self.env.make_snapshot("reconfigure_nova_quota")
    def reconfigure_overcommit_ratio(self):
        """Tests for reconfiguration nova CPU overcommit ratio.

        Scenario:
            1. Revert snapshot "basic_env_for_reconfiguration"
            2. Apply new CPU overcommit ratio for each controller
            3. Verify deployment task is finished
            4. Verify nova-scheduler services uptime
            5. Verify configuration file on each controller
            6. Boot instances with flavor that occupy all CPU,
               boot extra instance and catch the error
            7. Apply old CPU overcommit ratio for each controller
            8. Verify deployment task is finished
            9. Verify nova-scheduler services uptime
            10. Verify configuration file on each controller

        Snapshot: reconfigure_overcommit_ratio

        """
        self.check_run('reconfigure_overcommit_ratio')
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("basic_env_for_reconfiguration")

        cluster_id = self.fuel_web.get_last_created_cluster()

        self.show_step(2)
        config_new = utils.get_config_template('nova_cpu')
        structured_config = get_structured_config_dict(config_new)
        self.fuel_web.client.upload_configuration(config_new,
                                                  cluster_id,
                                                  role="controller")

        service_name = "nova-scheduler"

        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])
        uptimes = self.get_service_uptime(controllers, service_name)
        task = self.fuel_web.client.apply_configuration(cluster_id,
                                                        role="controller")

        self.show_step(3)
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(4)
        self.check_service_was_restarted(controllers, uptimes, service_name)

        self.show_step(5)
        self.check_config_on_remote(controllers, structured_config)

        self.show_step(6)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        self.check_overcommit_ratio(os_conn, cluster_id)

        self.show_step(7)
        config_revert = utils.get_config_template('nova_cpu_old')
        structured_config_revert = get_structured_config_dict(config_revert)
        self.fuel_web.client.upload_configuration(config_revert,
                                                  cluster_id,
                                                  role="controller")
        uptimes = self.get_service_uptime(controllers, service_name)
        task = self.fuel_web.client.apply_configuration(cluster_id,
                                                        role="controller")
        self.show_step(8)
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(9)
        self.check_service_was_restarted(controllers, uptimes, service_name)

        self.show_step(10)
        self.check_config_on_remote(controllers, structured_config_revert)

        self.env.make_snapshot("reconfigure_overcommit_ratio",
                               is_make=True)
    def reconfigure_ml2_vlan_range(self):
        """Reconfigure neutron ml2 VLAN range

        Scenario:
            1. Revert snapshot "deploy_neutron_vlan_ha"
            2. Upload a new openstack configuration
            3. Get uptime of process "neutron-server" on each controller
            4. Apply a new VLAN range(minimal range) to all nodes
            5. Wait for configuration applying
            6. Verify ml2 plugin settings
            7. Create new private network
            8. Try to create one more, verify that it is impossible

        Snapshot reconfigure_ml2_vlan_range

        """
        self.show_step(1)
        self.env.revert_snapshot("deploy_neutron_vlan_ha")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])
        controllers = [x['ip'] for x in controllers]

        self.show_step(2)
        config = utils.get_config_template('neutron')
        structured_config = get_structured_config_dict(config)
        self.fuel_web.client.upload_configuration(config, cluster_id)

        self.show_step(3)
        uptimes = dict(zip(controllers, range(len(controllers))))
        for controller in controllers:
            with self.env.d_env.get_ssh_to_remote(controller) as remote:
                uptimes[controller] = \
                    utils.get_process_uptime(remote, 'neutron-server')

        self.show_step(4)
        task = self.fuel_web.client.apply_configuration(cluster_id)

        self.show_step(5)
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(6)
        for controller in controllers:
            with self.env.d_env.get_ssh_to_remote(controller) as remote:
                uptime = utils.get_process_uptime(remote, 'neutron-server')
                asserts.assert_true(uptime <= uptimes[controller],
                                    'Service "neutron-servers" was not '
                                    'restarted on {0}'.format(controller))
                for configpath, params in structured_config.items():
                    result = remote.open(configpath)
                    conf_for_check = utils.get_ini_config(result)
                    for param in params:
                        utils.check_config(conf_for_check,
                                           configpath,
                                           param['section'],
                                           param['option'],
                                           param['value'])

        self.show_step(7)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        tenant = os_conn.get_tenant('admin')
        os_conn.create_network('net1', tenant_id=tenant.id)

        self.show_step(8)
        try:
            os_conn.create_network('net2', tenant_id=tenant.id)
        except Exception as e:
            if 'No tenant network is available' not in e.message:
                raise e
            pass
        else:
            raise Exception("New configuration was not applied")

        self.env.make_snapshot("reconfigure_ml2_vlan_range", is_make=True)
    def reconfigure_overcommit_ratio(self):
        """Tests for reconfiguration nova CPU overcommit ratio.

        Scenario:
            1. Create cluster
            2. Add 1 node with compute role
            3. Add 3 nodes with controller role
            4. Deploy the cluster
            5. Verify network
            6. Run OSTF
            7. Verify configuration file on each controller
            8. Apply new CPU overcommit ratio for each controller
            9. Verify deployment task is finished
            10. Verify nova-scheduler services uptime
            11. Boot instances with flavor that occupy all CPU
            12. Boot extra instance and catch the error

        Snapshot: reconfigure_overcommit_ratio

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        self.show_step(1)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT_TYPE,
            }
        )
        self.show_step(2)
        self.show_step(3)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller']
            })

        self.show_step(4)
        self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(7)
        cluster_id = self.fuel_web.get_last_created_cluster()
        config = utils.get_config_template('nova_cpu')
        structured_config = get_structured_config_dict(config)
        self.fuel_web.client.upload_configuration(config, cluster_id)

        service_name = "nova-scheduler"

        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])
        controllers = [x['ip'] for x in controllers]
        uptimes = dict(zip(controllers, range(len(controllers))))
        for controller in controllers:
            with self.env.d_env.get_ssh_to_remote(controller) as remote:
                uptimes[controller] = \
                    utils.get_process_uptime(remote, service_name)
        task = self.fuel_web.client.apply_configuration(cluster_id)

        self.show_step(8)
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(9)
        self.show_step(10)

        for controller in controllers:
            with self.env.d_env.get_ssh_to_remote(controller) as remote:
                uptime = utils.get_process_uptime(remote, service_name)
                asserts.assert_true(uptime <= uptimes[controller],
                                    "Service {0} was not restarted "
                                    "on {1}".format(controller, service_name))
                for configpath, params in structured_config.items():
                    result = remote.open(configpath)
                    conf_for_check = utils.get_ini_config(result)
                    for param in params:
                        utils.check_config(conf_for_check,
                                           configpath,
                                           param['section'],
                                           param['option'],
                                           param['value'])

        self.show_step(11)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        server = os_conn.create_instance(neutron_network=True,
                                         label=net_name,
                                         server_name="Test_reconfig",
                                         vcpus=2)
        os_conn.verify_instance_status(server, 'ACTIVE')
        self.show_step(12)
        excessive_server = os_conn.create_instance(neutron_network=True,
                                                   label=net_name,
                                                   server_name="excessive_VM",
                                                   flavor_name="overcommit")
        os_conn.verify_instance_status(excessive_server, 'ERROR')
        os_conn.delete_instance(excessive_server)
        self.env.make_snapshot("reconfigure_overcommit_ratio",
                               is_make=True)