def queue_must_exist(self):
     """Makes sure the queue exists."""
     rabbit = rpc.Rabbit()
     queue_name = "guestagent.%s" % instance_info.id
     count = rabbit.get_queue_items(queue_name)
     assert_is_not(count, None)
     assert_equal(count, 0)
예제 #2
0
    def check_emc_cinder_config(cls, remote, path):
        command = 'cat {0}'.format(path)
        conf_data = ''.join(remote.execute(command)['stdout'])
        conf_data = cStringIO.StringIO(conf_data)
        cinder_conf = ConfigParser.ConfigParser()
        cinder_conf.readfp(conf_data)

        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'volume_driver'),
            'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver')
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'),
            'global')
        asserts.assert_false(
            cinder_conf.getboolean('DEFAULT',
                                   'destroy_empty_storage_group'))
        asserts.assert_true(
            cinder_conf.getboolean('DEFAULT',
                                   'initiator_auto_registration'))
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1)
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'default_timeout'), 10)
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'naviseccli_path'),
            '/opt/Navisphere/bin/naviseccli')

        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT',
                                                   'san_secondary_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password'))
예제 #3
0
    def modify_resolv_conf(self, nameservers=None, merge=True):
        if nameservers is None:
            nameservers = []

        resolv_conf = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd='cat /etc/resolv.conf'
        )
        assert_equal(0, resolv_conf['exit_code'],
                     'Executing "{0}" on the admin node has failed with: {1}'
                     .format('cat /etc/resolv.conf', resolv_conf['stderr']))
        if merge:
            nameservers.extend(resolv_conf['stdout'])
        resolv_keys = ['search', 'domain', 'nameserver']
        resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
                             if any(x in ns for x in resolv_keys))
        logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
        echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
        echo_result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=echo_cmd
        )
        assert_equal(0, echo_result['exit_code'],
                     'Executing "{0}" on the admin node has failed with: {1}'
                     .format(echo_cmd, echo_result['stderr']))
        return resolv_conf['stdout']
    def test_delete(self):
        if do_not_delete_instance():
            report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was specified, "
                       "skipping delete...")
            raise SkipTest("TESTS_DO_NOT_DELETE_INSTANCE was specified.")
        global dbaas
        if not hasattr(instance_info, "initial_result"):
            raise SkipTest("Instance was never created, skipping test...")
        if WHITE_BOX:
            # Change this code to get the volume using the API.
            # That way we can keep it while keeping it black box.
            admin_context = context.get_admin_context()
            volumes = db.volume_get_all_by_instance(admin_context(),
                                                    instance_info.local_id)
            instance_info.volume_id = volumes[0].id
        # Update the report so the logs inside the instance will be saved.
        report.update()
        dbaas.instances.delete(instance_info.id)

        attempts = 0
        try:
            time.sleep(1)
            result = True
            while result is not None:
                attempts += 1
                result = dbaas.instances.get(instance_info.id)
                assert_equal(200, dbaas.last_http_code)
                assert_equal("SHUTDOWN", result.status)
        except exceptions.NotFound:
            pass
        except Exception as ex:
            fail("A failure occured when trying to GET instance %s for the %d "
                 "time: %s" % (str(instance_info.id), attempts, str(ex)))
예제 #5
0
 def test_node_create(self):
     """ Testing POST:/api/2.0/nodes/ """
     for n in self.__test_nodes:
         LOG.info('Creating node (name={0})'.format(n.get('name')))
         Api().nodes_post(identifiers=n)
         rsp = self.__client.last_response
         assert_equal(201, rsp.status, message=rsp.reason)
예제 #6
0
    def test_start_instance_with_configuration(self):
        # test that a new instance will apply the configuration on create
        global configuration_instance
        databases = []
        databases.append({"name": "firstdbconfig", "character_set": "latin2",
                          "collate": "latin2_general_ci"})
        databases.append({"name": "db2"})
        configuration_instance.databases = databases
        users = []
        users.append({"name": "liteconf", "password": "******",
                      "databases": [{"name": "firstdbconfig"}]})
        configuration_instance.users = users
        configuration_instance.name = "TEST_" + str(uuid.uuid4()) + "_config"
        flavor_href = instance_info.dbaas_flavor_href
        configuration_instance.dbaas_flavor_href = flavor_href
        configuration_instance.volume = instance_info.volume
        configuration_instance.dbaas_datastore = instance_info.dbaas_datastore
        configuration_instance.dbaas_datastore_version = \
            instance_info.dbaas_datastore_version
        configuration_instance.nics = instance_info.nics

        result = instance_info.dbaas.instances.create(
            configuration_instance.name,
            configuration_instance.dbaas_flavor_href,
            configuration_instance.volume,
            configuration_instance.databases,
            configuration_instance.users,
            nics=configuration_instance.nics,
            availability_zone="nova",
            datastore=configuration_instance.dbaas_datastore,
            datastore_version=configuration_instance.dbaas_datastore_version,
            configuration=configuration_href)
        assert_equal(200, instance_info.dbaas.last_http_code)
        assert_equal("BUILD", result.status)
        configuration_instance.id = result.id
예제 #7
0
    def test_node_workflows_post(self):
        """Testing node POST:id/workflows"""
        Nodes().api1_1_nodes_get()
        nodes = loads(self.__client.last_response.data)

        for n in nodes:
            if n.get('type') == 'compute':
                Nodes().api1_1_nodes_identifier_workflows_post(n.get('id'),name='Graph.noop-example',body={})
                returnedWorkflowID=str(json.loads(self.__client.last_response.data).get("id"))

                #wait for the workflow to finsih
                sleepTimeIncrement = 1
                waitedtime =0
                timeOut = 16
                i =0
                for  i in range (timeOut) :
                    time.sleep(sleepTimeIncrement)
                    Workflows().api1_1_workflows_identifier_get(returnedWorkflowID)
                    postedWorkflow=  json.loads(self.__client.last_response.data)
                    status= postedWorkflow.get("_status")
                    LOG.info('Attempting to check the status of the posted workflow after {0} sec(s)'.format(i))
                    if  status != "valid":
                        break
                    if status is "valid":
                        waitedtime =  waitedtime + sleepTimeIncrement
                    if i==(timeOut-1):
                        LOG.info ("Timed out after :"+ str(i))

                assert_equal(status,"succeeded")
예제 #8
0
    def post_workflows(self, graph_name, \
                       timeout_sec=300, nodes=[], data={}, \
                       tasks=[], callback=None, run_now=True):
        self.__graph_name = graph_name
        self.__graph_status = []

        Api().nodes_get_all()
        nodes = loads(self.__client.last_response.data)

        if callback == None:
            callback = self.handle_graph_finish

        for n in nodes:
            if n.get('type') == 'compute':
                id = n.get('id')
                assert_not_equal(id,None)
                LOG.info('starting amqp listener for node {0}'.format(id))
                worker = AMQPWorker(queue=QUEUE_GRAPH_FINISH, callbacks=[callback])
                thread = WorkerThread(worker, id)
                self.__tasks.append(thread)
                tasks.append(thread)
                try:
                    Api().nodes_workflow_action_by_id(id, {'command': 'cancel'})
                except ApiException as e:
                    assert_equal(404,e.status, message='status should be 404')
                except (TypeError, ValueError) as e:
                    assert(e.message)
                Api().nodes_post_workflow_by_id(id, name=self.__graph_name, body=data)

        if run_now:
            self.run_workflow_tasks(self.__tasks, timeout_sec)
예제 #9
0
 def test_expected_get_configuration_parameter(self):
     # tests get on a single parameter to verify it has expected attributes
     param_name = 'key_buffer_size'
     allowed_config_params = ['name', 'restart_required',
                              'max', 'min', 'type',
                              'deleted', 'deleted_at',
                              'datastore_version_id']
     param = instance_info.dbaas.configuration_parameters.get_parameter(
         instance_info.dbaas_datastore,
         instance_info.dbaas_datastore_version,
         param_name)
     resp, body = instance_info.dbaas.client.last_response
     print("params: %s" % param)
     print("resp: %s" % resp)
     print("body: %s" % body)
     attrcheck = AttrCheck()
     config_parameter_dict = json.loads(body)
     print("config_parameter_dict: %s" % config_parameter_dict)
     attrcheck.contains_allowed_attrs(
         config_parameter_dict,
         allowed_config_params,
         msg="Get Configuration parameter")
     assert_equal(param_name, config_parameter_dict['name'])
     with TypeCheck('ConfigurationParameter', param) as parameter:
         parameter.has_field('name', six.string_types)
         parameter.has_field('restart_required', bool)
         parameter.has_field('max', six.integer_types)
         parameter.has_field('min', six.integer_types)
         parameter.has_field('type', six.string_types)
         parameter.has_field('datastore_version_id', six.text_type)
예제 #10
0
 def delete_backup(self):
     results = self.snippet(
         "backup_delete",
         "/backups/%s" % self.json_backup.id,
         "DELETE", 202, "Accepted",
         lambda client: client.backups.delete(self.json_backup.id))
     assert_equal(len(results), 1)
    def multiple_cluster_net_setup(self):
        """Check master node deployment and configuration with 2 sets of nets

        Scenario:
            1. Revert snapshot with 5 slaves
            2. Check that slaves got IPs via DHCP from both admin/pxe networks
            3. Make environment snapshot
        Duration 6m
        Snapshot multiple_cluster_net_setup

        """

        if not MULTIPLE_NETWORKS:
            raise SkipTest()
        self.env.revert_snapshot("ready_with_5_slaves")

        # Get network parts of IP addresses with /24 netmask
        networks = [
            ".".join(self.env.get_network(n).split(".")[0:-1]) for n in [self.env.admin_net, self.env.admin_net2]
        ]
        nodes_addresses = [".".join(node["ip"].split(".")[0:-1]) for node in self.fuel_web.client.list_nodes()]

        assert_equal(
            set(networks),
            set(nodes_addresses),
            "Only one admin network is used for discovering slaves:" " '{0}'".format(set(nodes_addresses)),
        )

        self.env.make_snapshot("multiple_cluster_net_setup", is_make=True)
예제 #12
0
 def get_backups_for_instance(self):
     results = self.snippet(
         "backups_by_instance",
         "/instances/%s/backups" % json_instance.id,
         "GET", 200, "OK",
         lambda client: client.instances.backups(json_instance.id))
     assert_equal(len(results), 1)
예제 #13
0
 def create_instance(client, name, backup):
     instance = client.instances.create(
         name, 1,
         volume={'size': 2},
         restorePoint={'backupRef': backup})
     assert_equal(instance.status, "BUILD")
     return instance
예제 #14
0
 def get_instance_details(self):
     results = self.snippet(
         "instance_status_detail",
         "/instances/%s" % json_instance.id,
         "GET", 200, "OK",
         lambda client: client.instances.get(json_instance.id))
     assert_equal(results[JSON_INDEX].id, json_instance.id)
예제 #15
0
 def get_backup(self):
     results = self.snippet(
         "backup_get",
         "/backups/%s" % self.json_backup.id,
         "GET", 200, "OK",
         lambda client: client.backups.get(self.json_backup.id))
     assert_equal(len(results), 1)
예제 #16
0
 def get_check_root_access(self):
     results = self.snippet(
         "check_root_user",
         "/instances/%s/root" % json_instance.id,
         "GET", 200, "OK",
         lambda client: client.root.is_root_enabled(json_instance.id))
     assert_equal(results[JSON_INDEX].rootEnabled, True)
예제 #17
0
 def get_list_instance_index(self):
     results = self.snippet(
         "instances_index",
         "/instances", "GET", 200, "OK",
         lambda client: client.instances.list())
     for result in results:
         assert_equal(1, len(result))
예제 #18
0
 def test_read(self):
     try:
         spam = self.api.spam.get(self.spam.id)
         assert_is_none(self.read)
         assert_equal(spam, self.spam)
     except SpamHttpException as she:
         assert_equal(she.status_code, self.read)
예제 #19
0
 def an_instance_is_not_active(self):
     for instance in self.instances:
         instance = self.client.instances.get(instance.id)
         if instance.status != "ACTIVE":
             assert_equal(instance.status, "BUILD")
             return True
     return False
예제 #20
0
파일: test_cli.py 프로젝트: anbangr/fuel-qa
 def assert_cli_task_success(
         self, task, remote, timeout=70 * 60, interval=20):
     logger.info('Wait {timeout} seconds for task: {task}'
                 .format(timeout=timeout, task=task))
     start = time.time()
     try:
         wait(
             lambda: self.get_task(
                 remote, task['id'])['status'] != 'running',
             interval=interval,
             timeout=timeout
         )
     except TimeoutError:
         raise TimeoutError(
             "Waiting timeout {timeout} sec was reached for task: {task}"
             .format(task=task["name"], timeout=timeout))
     took = time.time() - start
     task = self.get_task(remote, task['id'])
     logger.info('Task finished in {took} seconds with the result: {task}'
                 .format(took=took, task=task))
     assert_equal(
         task['status'], 'ready',
         "Task '{name}' has incorrect status. {} != {}".format(
             task['status'], 'ready', name=task["name"]
         )
     )
예제 #21
0
 def test_create(self):
     try:
         self.spam = self.api.spam.create()
         assert_is_none(self.create)
     except SpamHttpException as she:
         self.spam = self.admin_api.spam.create()
         assert_equal(she.status_code, self.create)
예제 #22
0
 def assert_task_success(self, task, timeout=130 * 60, interval=5):
     task = self.task_wait(task, timeout, interval)
     assert_equal(
         task["status"],
         "ready",
         "Task '{name}' has incorrect status. {} != {}".format(task["status"], "ready", name=task["name"]),
     )
예제 #23
0
파일: root.py 프로젝트: NeCTAR-RC/trove
 def test_reset_root(self):
     if test_config.values['root_timestamp_disabled']:
         raise SkipTest("Enabled timestamp not enabled yet")
     old_ts = self.root_enabled_timestamp
     self._root()
     assert_not_equal(self.root_enabled_timestamp, 'Never')
     assert_equal(self.root_enabled_timestamp, old_ts)
예제 #24
0
파일: pxc.py 프로젝트: magictour/trove
    def test_instance_delete(self):
        """Tests the instance delete."""
        if not getattr(self, 'instance', None):
            raise SkipTest(
                "Skipping this test since instance is not available.")

        self.rd_client = create_dbaas_client(self.instance.user)
        self.rd_client.instances.delete(self.instance.id)

        asserts.assert_equal(202, self.rd_client.last_http_code)
        test_instance = self.rd_client.instances.get(self.instance.id)
        asserts.assert_equal("SHUTDOWN", test_instance.status)

        def _poll():
            try:
                instance = self.rd_client.instances.get(self.instance.id)
                self.report.log("Instance info %s" % instance._info)
                asserts.assert_equal("SHUTDOWN", instance.status)
                return False
            except exceptions.NotFound:
                self.report.log("Instance has gone.")
                asserts.assert_equal(404, self.rd_client.last_http_code)
                return True

        poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
        self.report.log("Deleted Instance ID: %s " % self.instance.id)
예제 #25
0
파일: pxc.py 프로젝트: magictour/trove
    def test_create_cluster_successfuly(self):
        valid_request_body = [
            {"flavorRef": self.instance.dbaas_flavor_href,
             "volume": self.instance.volume},
            {"flavorRef": self.instance.dbaas_flavor_href,
             "volume": self.instance.volume}]

        self.cluster = self.rd_client.clusters.create(
            "test_cluster", self.instance.dbaas_datastore,
            self.instance.dbaas_datastore_version,
            instances=valid_request_body)

        with TypeCheck('Cluster', self.cluster) as check:
            check.has_field("id", basestring)
            check.has_field("name", basestring)
            check.has_field("datastore", dict)
            check.has_field("instances", list)
            check.has_field("links", list)
            check.has_field("created", unicode)
            check.has_field("updated", unicode)
            for instance in self.cluster.instances:
                isinstance(instance, dict)
                asserts.assert_is_not_none(instance['id'])
                asserts.assert_is_not_none(instance['links'])
                asserts.assert_is_not_none(instance['name'])
        asserts.assert_equal(200, self.rd_client.last_http_code)
    def _delete_instance(self):
        """Deletes an instance.

        This call polls the REST API until NotFound is raised. The entire
        time it also makes sure that the API returns SHUTDOWN.

        """
        # Update the report so the logs inside the instance will be saved.
        report.update()
        self.dbaas.instances.delete(self.id)
        attempts = 0
        try:
            time.sleep(1)
            result = True
            while result is not None:
                time.sleep(2)
                attempts += 1
                result = None
                result = self.dbaas.instances.get(self.id)
                assert_equal(dbaas_mapping[power_state.SHUTDOWN],
                             result.status)
        except exception.NotFound:
            pass
        except NotFound404:
            pass
        except Exception as ex:
            fail("A failure occured when trying to GET instance %s"
                 " for the %d time: %s" % (str(self.id), attempts, str(ex)))
        self._check_vifs_cleaned()
    def _assert_status_failure(result):
        """Checks if status==FAILED, plus asserts REST API is in sync.

        The argument is a tuple for the state in the database followed by
        the REST API status for the instance.

        If state is BUILDING this will assert that the REST API result is
        similar, or is FAILED (because the REST API is called after the
        call to the database the status might change in between).

        """
        if result[0].state == power_state.BUILDING:
            assert_true(
                result[1].status == dbaas_mapping[power_state.BUILDING] or
                result[1].status == dbaas_mapping[power_state.FAILED],
                "Result status from API should only be BUILDING or FAILED"
                " at this point but was %s" % result[1].status)
            return False
        else:
            # After building the only valid state is FAILED (because
            # we've destroyed the instance).
            assert_equal(result[0].state, power_state.FAILED)
            # Make sure the REST API agrees.
            assert_equal(result[1].status, dbaas_mapping[power_state.FAILED])
            return True
예제 #28
0
파일: root.py 프로젝트: NeCTAR-RC/trove
 def test_root_initially_disabled_details(self):
     """Use instance details to test that root is disabled."""
     instance = self.dbaas.instances.get(instance_info.id)
     assert_true(hasattr(instance, 'rootEnabled'),
                 "Instance has no rootEnabled property.")
     assert_false(instance.rootEnabled, "Root SHOULD NOT be enabled.")
     assert_equal(self.root_enabled_timestamp, 'Never')
 def test_mgmt_ips_associated(self):
     # Test that the management index properly associates an instances with
     # ONLY its IPs.
     mgmt_index = dbaas_admin.management.index()
     # Every instances has exactly one address.
     for instance in mgmt_index:
         assert_equal(1, len(instance.ips))
예제 #30
0
 def assert_task_failed(self, task, timeout=70 * 60, interval=5):
     task = self.task_wait(task, timeout, interval)
     assert_equal(
         "error",
         task["status"],
         "Task '{name}' has incorrect status. {} != {}".format(task["status"], "error", name=task["name"]),
     )
예제 #31
0
 def host(self, result):
     assert_equal(result.host, 'fake_host_1')
예제 #32
0
 def list_backups(self):
     results = self.snippet(
         "backup_list",
         "/backups", "GET", 200, "OK",
         lambda client: client.backups.list())
     assert_equal(len(results), 1)
예제 #33
0
def clean_slate():
    client = create_client(TroveHTTPClient, admin_user)
    client.client.name = "list"
    instances = client.instances.list()
    assert_equal(0, len(instances), "Instance count must be zero.")
예제 #34
0
 def tenant_id(self, result):
     assert_equal(result.tenant_id, conf['normal_user_tenant'])
예제 #35
0
 def task_description(self, result):
     assert_equal(result.task_description, "No tasks for the instance.")
예제 #36
0
 def status(self, result):
     assert_equal("ACTIVE", result.status)
예제 #37
0
 def id(self):
     assert_equal(self.results[JSON_INDEX].id, json_instance.id)
예제 #38
0
 def flavor(self, result):
     # TODO(imsplitbit): remove the coercion when python-troveclient fixes
     # land in the public.
     assert_true(
         int(result.flavor['id']) == 1 or int(result.flavor['id']) == 3)
     assert_equal(len(result.flavor['links']), 2)
예제 #39
0
 def _migrate_up(self, engine, version):
     """Migrate up to a new version of database."""
     migration_api.upgrade(engine, self.REPOSITORY, version)
     assert_equal(version, migration_api.db_version(engine,
                                                    self.REPOSITORY))
예제 #40
0
 def guest_status(self, result):
     assert_equal(result.guest_status['state_description'], 'running')
예제 #41
0
    def remove_controllers(self):
        """Deploy cluster with 3 controllers, remove 2 controllers
           and re-deploy, check hosts and corosync

        Scenario:
            1. Create cluster
            2. Add 3 controller, 1 compute
            3. Deploy the cluster
            4. Remove 2 controllers
            5. Deploy changes
            6. Run OSTF
            7. Verify networks
            8. Check /etc/hosts that removed nodes aren't present
            9. Check corosync.conf that removed nodes aren't present

        Duration 120m
        Snapshot remove_controllers

        """
        self.env.revert_snapshot("ready_with_5_slaves")
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE)
        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute']
            })
        self.show_step(3)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        hosts = []

        for node_name in ('slave-02', 'slave-03'):
            node = self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.get_node(name=node_name))
            hostname = ''.join(
                self.ssh_manager.execute_on_remote(
                    ip=node['ip'], cmd="hostname")['stdout']).strip()
            hosts.append(hostname)
        logger.debug('hostname are {}'.format(hosts))
        nodes = {'slave-02': ['controller'], 'slave-03': ['controller']}
        self.show_step(4)
        self.fuel_web.update_nodes(cluster_id, nodes, False, True)
        self.show_step(5)
        self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(6)
        self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)

        node = self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.get_node(name='slave-01'))
        self.show_step(8)
        self.show_step(9)
        for host in hosts:
            result = self.ssh_manager.execute_on_remote(
                ip=node['ip'], cmd="grep '{}' /etc/hosts".format(host))
            assert_equal(result['exit_code'], 1,
                         "host {} is present in /etc/hosts".format(host))
            result = self.ssh_manager.execute_on_remote(
                ip=node['ip'],
                cmd="grep '{}' /etc/corosync/"
                "corosync.conf".format(host))
            assert_equal(
                result['exit_code'], 1, "host {} is present in"
                " /etc/corosync/corosync.conf".format(host))
        self.env.make_snapshot("remove_controllers")
예제 #42
0
 def deleted(self):
     assert_equal(self.results[JSON_INDEX].deleted, False)
예제 #43
0
 def check_server_config(self):
     """Testing GET:/config to get server configuration"""
     Config().config_get()
     rsp = self.__client.last_response
     assert_equal(200, rsp.status, message=rsp.reason)
예제 #44
0
 def _migrate_down(self, engine, version):
     """Migrate down to an old version of database."""
     migration_api.downgrade(engine, self.REPOSITORY, version)
     assert_equal(version, migration_api.db_version(engine,
                                                    self.REPOSITORY))
예제 #45
0
 def test_delete_database_on_missing_instance(self):
     assert_raises(exceptions.NotFound, self.dbaas.databases.delete, -1,
                   self.dbname_urlencoded)
     assert_equal(404, self.dbaas.last_http_code)
예제 #46
0
 def test_case(txn_list, selected_master):
     with patch.object(self.manager,
                       '_get_replica_txns',
                       return_value=txn_list):
         result = self.manager._most_current_replica(master, None)
         assert_equal(result, selected_master)
예제 #47
0
 def execute_remote_cmd(self, remote, cmd, exit_code=0):
     result = remote.execute(cmd)
     assert_equal(
         result['exit_code'], exit_code,
         'Failed to execute "{0}" on remote host: {1}'.format(cmd, result))
     return result['stdout']
예제 #48
0
 def test_invalid_database_name(self):
     databases = []
     databases.append({"name": "sdfsd,"})
     assert_raises(exceptions.BadRequest, self.dbaas.databases.create,
                   instance_info.id, databases)
     assert_equal(400, self.dbaas.last_http_code)
예제 #49
0
    def bonding_conf_consistency(self):
        """Verify that network configuration with bonds is consistent\
         after deployment failure

        Scenario:
            1. Create an environment
            2. Add 3 nodes with controller role
            3. Add 1 node with compute role
            4. Setup bonding for all interfaces (including admin interface
               bonding)
            5. Run network verification
            6. Update 'connectivity_tests' puppet manifest to cause the\
               deployment process fail right after 'netconfig' task is finished
            7. Start deployment and wait until it fails
            8. Verify that interfaces are not lost from the configured bonds
            9. Restore the initial version of 'connectivity_tests' manifest
            10. Redeploy the cluster and run basic health checks
            11. Run network verification

        Duration 120m
        Snapshot bonding_conf_consistency
        """

        self.env.revert_snapshot("ready_with_5_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['vlan'],
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
            }
        )

        self.show_step(4)
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(
                node['id'], interfaces_dict=deepcopy(self.INTERFACES),
                raw_data=deepcopy(self.BOND_CONFIG)
            )

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        # Get ID of a (pending) primary controller
        pending_ctrl_ids = [n['id'] for n in nailgun_nodes
                            if 'controller' in n['pending_roles']]
        node_id = sorted(pending_ctrl_ids)[0]

        # Get interfaces data of the primary controller for which deployment
        # will be forced to fail
        ifaces_data = self.fuel_web.client.get_node_interfaces(node_id)

        self.show_step(6)
        pp_file = ("/etc/puppet/modules/osnailyfacter/modular/netconfig/"
                   "connectivity_tests.pp")
        with self.env.d_env.get_admin_remote() as admin_node:
            # Backup the manifest to be updated for the sake of the test
            backup_cmd = "cp {0} {1}".format(pp_file, pp_file + "_bak")
            res = admin_node.execute(backup_cmd)
            assert_equal(0, res['exit_code'],
                         "Failed to create a backup copy of {0} puppet "
                         "manifest on master node".format(pp_file))

            fail_cmd = ("echo 'fail(\"Emulate deployment failure after "
                        "netconfig!\")' >> {0}".format(pp_file))
            res = admin_node.execute(fail_cmd)
            assert_equal(0, res['exit_code'],
                         "Failed to update {0} puppet manifest "
                         "on master node".format(pp_file))

        self.show_step(7)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_failed(task)

        # Get interfaces data after deployment failure on
        # the primary controller
        ifaces_data_latest = self.fuel_web.client.get_node_interfaces(node_id)

        self.show_step(8)
        # Bond interfaces are always the last objects in the list being
        # returned by 'get node interfaces' API request.
        # So having 2 bonds on the node under test the last 2 objects
        # in the corresponding list are being examined below
        admin_bond_ifaces = ifaces_data[-1]['slaves']
        admin_bond_ifaces_latest = ifaces_data_latest[-1]['slaves']
        assert_equal(len(admin_bond_ifaces), len(admin_bond_ifaces_latest),
                     "Admin interface bond config is inconsistent; "
                     "interface(s) have disappeared from the bond")
        others_bond_ifaces = ifaces_data[-2]['slaves']
        others_bond_ifaces_latest = ifaces_data_latest[-2]['slaves']
        assert_equal(len(others_bond_ifaces), len(others_bond_ifaces_latest),
                     "Other network interfaces bond config is inconsistent; "
                     "interface(s) have disappeared from the bond")

        self.show_step(9)
        with self.env.d_env.get_admin_remote() as admin_node:
            restore_cmd = "cp {0} {1}".format(pp_file + "_bak", pp_file)
            res = admin_node.execute(restore_cmd)
            assert_equal(0, res['exit_code'],
                         "Failed to restore the backup copy of {0} puppet "
                         "manifest on master node".format(pp_file))

        self.show_step(10)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.env.make_snapshot("bonding_conf_consistency")
예제 #50
0
 def test_cannot_delete_taboo_database_names(self):
     for name in self.system_dbs:
         assert_raises(exceptions.BadRequest, self.dbaas.databases.delete,
                       instance_info.id, name)
         assert_equal(400, self.dbaas.last_http_code)
예제 #51
0
 def test_instance_has_new_flavor_after_resize(self):
     actual = self.instance.flavor['id']
     asserts.assert_equal(actual, self.expected_new_flavor_id)
예제 #52
0
    def check_hugepages_distribution_per_numa(self):
        """Deploy environment with different HugePages allocation

        Scenario:
            1. Revert basic_env_for_hugepages snapshot
            2. Configure hugepages for three computes
            3. Deploy cluster
            4. Validate available huge pages on computes

        Snapshot: check_hugepages_distribution_per_numa
        """
        snapshot_name = "check_hugepages_distribution_per_numa"
        self.check_run(snapshot_name)

        self.show_step(1)
        self.env.revert_snapshot("basic_env_for_hugepages")

        self.show_step(2)
        cluster_id = self.fuel_web.get_last_created_cluster()
        mixed_host = "slave-01"
        one_gb_host = "slave-02"
        two_mb_host = "slave-03"
        mixed_role_host = "slave-04"

        configs = {
            mixed_host: {
                "cpu_pinning": {
                    "nova": {
                        "value": 2
                    }
                },
                "hugepages": {
                    "nova": {
                        "value": {
                            "2048": 258,
                            "1048576": 1
                        }
                    }
                }
            },
            one_gb_host: {
                "cpu_pinning": {
                    "nova": {
                        "value": 2
                    }
                },
                "hugepages": {
                    "nova": {
                        "value": {
                            "2048": 0,
                            "1048576": 2
                        }
                    }
                }
            },
            two_mb_host: {
                "cpu_pinning": {
                    "nova": {
                        "value": 2
                    }
                },
                "hugepages": {
                    "nova": {
                        "value": {
                            "2048": 540,
                            "1048576": 0
                        }
                    }
                }
            },
            mixed_role_host: {
                "cpu_pinning": {
                    "nova": {
                        "value": 2
                    }
                },
                "hugepages": {
                    "nova": {
                        "value": {
                            "2048": 258,
                            "1048576": 1
                        }
                    }
                }
            },
        }

        for compute_name, config in configs.items():
            compute_id = \
                self.fuel_web.get_nailgun_node_by_name(compute_name)['id']
            original_config = \
                self.fuel_web.client.get_node_attributes(compute_id)
            self.fuel_web.client.upload_node_attributes(
                utils.dict_merge(original_config, config), compute_id)

        self.show_step(3)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(4)
        for compute_name, config in configs.items():
            two_mb_count = config["hugepages"]["nova"]["value"]["2048"]
            one_gb_count = config["hugepages"]["nova"]["value"]["1048576"]

            compute = self.fuel_web.get_nailgun_node_by_name(compute_name)
            cmd = ("cat /sys/devices/system/node/node{}/hugepages/"
                   "hugepages-{}kB/nr_hugepages")

            actual_two_mb_count = 0
            actual_one_gb_count = 0

            for numa_node in [0, 1]:
                actual_two_mb_count += int("".join(
                    self.ssh_manager.execute(compute['ip'],
                                             cmd.format(numa_node,
                                                        "2048"))["stdout"]))

                result = "".join(
                    self.ssh_manager.execute(compute['ip'],
                                             cmd.format(numa_node,
                                                        "1048576"))["stdout"])

                result = "0" if not result else result
                actual_one_gb_count += int(result)

            asserts.assert_equal(
                two_mb_count, actual_two_mb_count,
                "Actual number of allocated 2Mb pages is {}, expected {}".
                format(actual_two_mb_count, two_mb_count))
            asserts.assert_equal(
                one_gb_count, actual_one_gb_count,
                "Actual number of allocated 1Gb pages is {}, expected {}".
                format(actual_one_gb_count, one_gb_count))

        self.env.make_snapshot(snapshot_name, is_make=True)
예제 #53
0
    def offloading_bond_neutron_vlan(self):
        """Verify offloading types for the logical bonded interfaces and
        neutron VLAN

        Scenario:
            1. Create cluster with neutron VLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Configure offloading modes for bonded interfaces
            5. Setup offloading types
            6. Run network verification
            7. Deploy the cluster
            8. Run network verification
            9. Verify offloading types for the bonded interfaces
            10. Run OSTF

        Duration 60m
        Snapshot offloading_bond_neutron_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            })

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })

        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)

        self.show_step(4)
        bond0 = self.get_bond_interfaces(self.BOND_CONFIG, 'bond0')
        bond1 = self.get_bond_interfaces(self.BOND_CONFIG, 'bond1')
        offloadings_1 = []
        offloadings_2 = []
        for node in nodes:
            modes = self.fuel_web.get_offloading_modes(node['id'], bond0)
            for name in self.offloadings_1:
                if name in modes and name not in offloadings_1:
                    offloadings_1.append(name)
            modes = self.fuel_web.get_offloading_modes(node['id'], bond1)
            for name in self.offloadings_2:
                if name in modes and name not in offloadings_2:
                    offloadings_2.append(name)

        assert_true(len(offloadings_1) > 0, "No types for disable offloading")
        assert_true(len(offloadings_2) > 0, "No types for enable offloading")

        modes = self.prepare_offloading_modes(['bond0'], offloadings_1, False)
        modes += self.prepare_offloading_modes(['bond1'], offloadings_2, True)

        self.show_step(5)
        for node in nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                interfaces_dict=deepcopy(self.INTERFACES),
                raw_data=deepcopy(self.BOND_CONFIG))
            for offloading in modes:
                self.fuel_web.update_offloads(node['id'], deepcopy(offloading),
                                              offloading['name'])

        self.show_step(6)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(9)
        for node in nodes:
            for eth in bond0:
                for name in offloadings_1:
                    with self.env.d_env.get_ssh_to_remote(node['ip']) as host:
                        result = check_offload(host, eth, name)
                        assert_equal(
                            result, 'off',
                            "Offload type '{0}': '{1}' - node-{2}, {3}".format(
                                name, result, node['id'], eth))
            for eth in bond1:
                for name in offloadings_2:
                    with self.env.d_env.get_ssh_to_remote(node['ip']) as host:
                        result = check_offload(host, eth, name)
                        assert_equal(
                            result, 'on',
                            "Offload type '{0}': '{1}' - node-{2}, {3}".format(
                                name, result, node['id'], eth))

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_bond_neutron_vlan")
예제 #54
0
    def deploy_bonding_neutron_tun(self):
        """Deploy cluster with active-backup bonding and Neutron VXLAN

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup bonding for all interfaces (including admin interface
               bonding)
            5. Run network verification
            6. Deploy the cluster
            7. Run network verification
            8. Run OSTF
            9. Save network configuration from slave nodes
            10. Reboot all environment nodes
            11. Verify that network configuration is the same after reboot
            12. Run network verification
            13. Run OST

        Duration 70m
        Snapshot deploy_bonding_neutron_tun
        """

        self.env.revert_snapshot("ready_with_5_slaves")

        segment_type = NEUTRON_SEGMENT['tun']

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type,
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            }
        )

        net_params = self.fuel_web.client.get_networks(cluster_id)

        self.show_step(4)
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(
                node['id'], interfaces_dict=deepcopy(self.INTERFACES),
                raw_data=deepcopy(self.BOND_CONFIG)
            )

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        assert_equal(str(net_params["networking_parameters"]
                         ['segmentation_type']), segment_type)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(9)
        self.show_step(10)
        self.show_step(11)
        self.check_interfaces_config_after_reboot(cluster_id)
        self.fuel_web.assert_ha_services_ready(cluster_id)

        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(13)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_bonding_neutron_tun")
예제 #55
0
 def testE(self):
     self.b = "b"
     assert_equal(self.b, "b")
예제 #56
0
 def call_reboot(self):
     self.instance.restart()
     asserts.assert_equal(202, self.dbaas.last_http_code)
예제 #57
0
파일: root.py 프로젝트: zn-share/trove
 def _root(self):
     global root_password
     self.dbaas.root.create(instance_info.id)
     assert_equal(200, self.dbaas.last_http_code)
     reh = self.dbaas_admin.management.root_enabled_history
     self.root_enabled_timestamp = reh(instance_info.id).enabled
예제 #58
0
 def testF(self):
     self.c = "c"
     assert_equal(self.c, "c")
예제 #59
0
파일: root.py 프로젝트: zn-share/trove
 def _verify_root_timestamp(self, id):
     reh = self.dbaas_admin.management.root_enabled_history(id)
     timestamp = reh.enabled
     assert_equal(self.root_enabled_timestamp, timestamp)
     assert_equal(id, reh.id)
예제 #60
0
 def testD(self):
     self.a = "a"
     assert_equal(self.a, "a")