Esempio n. 1
1
    def ceph_multinode_with_cinder(self):
        """Deploy ceph with cinder in simple mode

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 2 nodes with cinder and ceph OSD roles
            5. Deploy the cluster
            6. Check ceph status
            7. Check partitions on controller node

        Snapshot ceph_multinode_with_cinder

        """
        if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
            raise SkipTest()

        self.env.revert_snapshot("ready")
        self.env.bootstrap_nodes(self.env.nodes().slaves[:4])

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_SIMPLE,
            settings={
                'volumes_ceph': False,
                'images_ceph': True,
                'volumes_lvm': True
            }
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder', 'ceph-osd'],
                'slave-04': ['cinder', 'ceph-osd']
            }
        )
        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)
        check_ceph_health(self.env.get_ssh_to_remote_by_name('slave-01'))

        disks = self.fuel_web.client.get_node_disks(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['id'])

        logger.info("Current disk partitions are: \n{d}".format(d=disks))

        logger.info("Check unallocated space")
        # We expect failure here only for release 5.0 due to bug
        # https://bugs.launchpad.net/fuel/+bug/1306625, so it is
        # necessary to assert_true in the next release.
        assert_false(
            checkers.check_unallocated_space(disks, contr_img_ceph=True),
            "Check unallocated space on controller")

        # Run ostf
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_multinode_with_cinder")
Esempio n. 2
0
    def octane_action(self, action, path=None):
        assert_true(action in self.OCTANE_COMMANDS.keys(),
                    "Unknown octane action '{}', aborting".format(action))
        octane_cli_args = {'path': path, 'pwd': KEYSTONE_CREDS['password']}
        admin_remote = self.env.d_env.get_admin_remote()
        if 'backup' in action:
            assert_false(
                admin_remote.exists(path),
                'File {!r} already exists, not able to reuse'.format(path))
        elif 'restore' in action:
            assert_true(
                admin_remote.exists(path),
                'File {!r} does not exists - can not run restore'.format(path))

        cmd = self.OCTANE_COMMANDS[action].format(**octane_cli_args)

        try:
            admin_remote.check_call(cmd, timeout=60 * 60)
        except (DevopsCalledProcessError, TimeoutError):
            # snapshot generating procedure can be broken
            admin_remote.download(
                "/var/log/octane.log",
                os.path.join(LOGS_DIR,
                             "octane_{}_.log".format(os.path.basename(path))))
            raise

        if 'backup' in action:
            assert_true(
                admin_remote.exists(path),
                "File {!r} was not created after backup command!".format(path))
Esempio n. 3
0
    def cli_node_deletion_check(self):
        """Destroy node and remove it from Nailgun using Fuel CLI

        Scenario:
            1. Revert snapshot 'cli_selected_nodes_deploy'
            2. Check 'slave-03' is present
            3. Destroy 'slave-03'
            4. Wait until 'slave-03' become offline
            5. Delete offline 'slave-03' from db
            6. Check presence of 'slave-03'

        Duration 30m

        """
        self.env.revert_snapshot("cli_selected_nodes_deploy")

        node_id = self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[2])['id']

        assert_true(
            check_cobbler_node_exists(self.ssh_manager.admin_ip, node_id),
            "node-{0} is not found".format(node_id))
        self.env.d_env.nodes().slaves[2].destroy()
        try:
            wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[2])['online'],
                 timeout=60 * 6)
        except TimeoutError:
            raise
        admin_ip = self.ssh_manager.admin_ip
        cmd = 'fuel node --node-id {0} --delete-from-db'.format(node_id)
        res = self.ssh_manager.execute_on_remote(admin_ip, cmd)
        assert_true(
            res['exit_code'] == 0, "Offline node-{0} was not"
            "deleted from database".format(node_id))

        cmd = "fuel node | awk '{{print $1}}' | grep -w '{0}'".format(node_id)
        try:
            wait(lambda: not self.ssh_manager.execute_on_remote(
                admin_ip, cmd, raise_on_assert=False)['exit_code'] == 0,
                 timeout=60 * 4)
        except TimeoutError:
            raise TimeoutError(
                "After deletion node-{0} is found in fuel list".format(
                    node_id))
        is_cobbler_node_exists = check_cobbler_node_exists(
            self.ssh_manager.admin_ip, node_id)

        assert_false(
            is_cobbler_node_exists,
            "After deletion node-{0} is found in cobbler list".format(node_id))
        cmd = "fuel env | tail -n 1 | awk {'print $1'}"
        cluster_id = self.ssh_manager.execute_on_remote(admin_ip,
                                                        cmd)['stdout_str']

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'],
                               should_fail=1)
    def check_emc_cinder_config(cls, ip, path):
        with SSHManager().open_on_remote(
            ip=ip,
            path=path
        ) as f:
            cinder_conf = configparser.ConfigParser()
            cinder_conf.readfp(f)

        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'volume_driver'),
            'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver')
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'),
            'global')
        asserts.assert_false(
            cinder_conf.getboolean('DEFAULT',
                                   'destroy_empty_storage_group'))
        asserts.assert_true(
            cinder_conf.getboolean('DEFAULT',
                                   'initiator_auto_registration'))
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1)
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'default_timeout'), 10)
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'naviseccli_path'),
            '/opt/Navisphere/bin/naviseccli')

        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT',
                                                   'san_secondary_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password'))
    def octane_action(self, action, path=None):
        assert_true(action in self.OCTANE_COMMANDS.keys(),
                    "Unknown octane action '{}', aborting".format(action))
        octane_cli_args = {
            'path': path,
            'pwd': KEYSTONE_CREDS['password']
        }
        admin_remote = self.env.d_env.get_admin_remote()
        if 'backup' in action:
            assert_false(
                admin_remote.exists(path),
                'File {!r} already exists, not able to reuse'.format(path))
        elif 'restore' in action:
            assert_true(
                admin_remote.exists(path),
                'File {!r} does not exists - can not run restore'.format(path))

        cmd = self.OCTANE_COMMANDS[action].format(**octane_cli_args)

        try:
            admin_remote.check_call(cmd, timeout=60 * 60)
        except (DevopsCalledProcessError, TimeoutError):
            # snapshot generating procedure can be broken
            admin_remote.download(
                "/var/log/octane.log",
                os.path.join(LOGS_DIR,
                             "octane_{}_.log".format(os.path.basename(path))))
            raise

        if 'backup' in action:
            assert_true(
                admin_remote.exists(path),
                "File {!r} was not created after backup command!".format(path))
Esempio n. 6
0
    def check_emc_cinder_config(cls, remote, path):
        command = 'cat {0}'.format(path)
        conf_data = ''.join(remote.execute(command)['stdout'])
        conf_data = cStringIO.StringIO(conf_data)
        cinder_conf = ConfigParser.ConfigParser()
        cinder_conf.readfp(conf_data)

        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'volume_driver'),
            'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver')
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'),
            'global')
        asserts.assert_false(
            cinder_conf.getboolean('DEFAULT',
                                   'destroy_empty_storage_group'))
        asserts.assert_true(
            cinder_conf.getboolean('DEFAULT',
                                   'initiator_auto_registration'))
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1)
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'default_timeout'), 10)
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'naviseccli_path'),
            '/opt/Navisphere/bin/naviseccli')

        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT',
                                                   'san_secondary_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password'))
Esempio n. 7
0
    def check_emc_cinder_config(cls, ip, path):
        with SSHManager().open_on_remote(ip=ip, path=path) as f:
            cinder_conf = configparser.ConfigParser()
            cinder_conf.readfp(f)

        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'volume_driver'),
            'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver')
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'),
            'global')
        asserts.assert_false(
            cinder_conf.getboolean('DEFAULT', 'destroy_empty_storage_group'))
        asserts.assert_true(
            cinder_conf.getboolean('DEFAULT', 'initiator_auto_registration'))
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1)
        asserts.assert_equal(cinder_conf.getint('DEFAULT', 'default_timeout'),
                             10)
        asserts.assert_equal(cinder_conf.get('DEFAULT', 'naviseccli_path'),
                             '/opt/Navisphere/bin/naviseccli')

        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip'))
        asserts.assert_true(
            cinder_conf.has_option('DEFAULT', 'san_secondary_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password'))
Esempio n. 8
0
 def test_root_initially_disabled_details(self):
     """Use instance details to test that root is disabled."""
     instance = self.dbaas.instances.get(instance_info.id)
     assert_true(hasattr(instance, 'rootEnabled'),
                 "Instance has no rootEnabled property.")
     assert_false(instance.rootEnabled, "Root SHOULD NOT be enabled.")
     assert_equal(self.root_enabled_timestamp, 'Never')
 def restart_rabbit_again(self):
     """Now stop and start rabbit, ensuring the agent reconnects."""
     self.rabbit.stop()
     assert_false(self.rabbit.is_alive)
     self.rabbit.reset()
     self.rabbit.start()
     assert_true(self.rabbit.is_alive)
Esempio n. 10
0
 def test_create_config_type_twice_fails(self):
     name = "test-delete-config-types"
     restart_required = 1
     data_type = "string"
     max_size = None
     min_size = None
     client = self.admin_client.mgmt_configs
     client.create(
         self.datastore_version_id,
         name,
         restart_required,
         data_type,
         max_size,
         min_size)
     asserts.assert_raises(exceptions.BadRequest,
                           client.create,
                           self.datastore_version_id,
                           name,
                           restart_required,
                           data_type,
                           max_size,
                           min_size)
     client.delete(self.datastore_version_id, name)
     config_list = client.parameters_by_version(self.datastore_version_id)
     asserts.assert_true(name not in [conf.name for conf in config_list])
     # testing that recreate of a deleted parameter works.
     client.create(
         self.datastore_version_id,
         name,
         restart_required,
         data_type,
         max_size,
         min_size)
     config_list = client.parameters_by_version(self.datastore_version_id)
     asserts.assert_false(name not in [conf.name for conf in config_list])
 def test_delete_database(self):
     self.dbaas.databases.delete(instance_info.id, self.dbname_urlencoded)
     if not FAKE:
         time.sleep(5)
     dbs = self.dbaas.databases.list(instance_info.id)
     found = any(result.name == self.dbname_urlencoded for result in dbs)
     assert_false(found, "Database '%s' SHOULD NOT be found in result" % self.dbname_urlencoded)
Esempio n. 12
0
def shut_down():
    """Shut down the web service and destroys the database."""
    admin = mymodule.get_admin_client()
    if admin.service_is_up:
        mymodule.stop_web_server()
        assert_false(admin.service_is_up())
    mymodule.destroy_database()
Esempio n. 13
0
 def verify_defaults(self, cluster_id):
     """Method designed to verify plugin default values."""
     attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
     assert_false(
         attr['editable'][gcs_settings.plugin_name]['metadata']['enabled'],
         'Plugin should be disabled by default.')
     # attr value is being assigned twice in order to fit PEP8 restriction:
     # lines in file can not be longer than 80 characters.
     attr = attr['editable'][gcs_settings.plugin_name]['metadata']
     attr = attr['versions'][0]
     error_list = []
     for key in gcs_settings.default_values.keys():
         next_key = 'value'
         if key == 'metadata':
             next_key = 'hot_pluggable'
         msg = 'Default value is incorrect, got {} = {} instead of {}'
         try:
             assert_equal(gcs_settings.default_values[key],
                          attr[key][next_key],
                          msg.format(key,
                                     attr[key][next_key],
                                     gcs_settings.default_values[key]))
         except AssertionError as e:
             error_list.append(''.join(('\n', str(e))))
     error_msg = ''.join(error_list)
     assert_equal(len(error_msg), 0, error_msg)
Esempio n. 14
0
    def cli_cluster_deletion(self):
        """Delete a cluster using Fuel CLI

        Scenario:
            1. Revert snapshot 'cli_selected_nodes_deploy'
            2. Delete cluster via cli
            3. Check cluster absence in the list

        Duration 25m

        """
        self.env.revert_snapshot("cli_selected_nodes_deploy")

        cluster_id = self.fuel_web.get_last_created_cluster()
        with self.env.d_env.get_admin_remote() as remote:
            res = remote.execute(
                'fuel --env {0} env delete'.format(cluster_id))
        assert_true(res['exit_code'] == 0)

        with self.env.d_env.get_admin_remote() as remote:
            try:
                wait(lambda: remote.execute("fuel env |  awk '{print $1}'"
                                            " |  tail -n 1 | grep '^.$'")[
                                                'exit_code'] == 1,
                     timeout=60 * 6)
            except TimeoutError:
                raise TimeoutError(
                    "cluster {0} was not deleted".format(cluster_id))

        assert_false(
            check_cluster_presence(cluster_id, self.env.postgres_actions),
            "cluster {0} is found".format(cluster_id))
Esempio n. 15
0
    def assert_users_list(self, instance_id, expected_user_defs, expected_http_code, limit=2):
        full_list = self.rd_client.users.list(instance_id)
        self.assert_client_code(expected_http_code)
        listed_users = {user.name: user for user in full_list}
        asserts.assert_is_none(full_list.next, "Unexpected pagination in the list.")

        for user_def in expected_user_defs:
            user_name = user_def["name"]
            asserts.assert_true(
                user_name in listed_users, "User not included in the 'user-list' output: %s" % user_name
            )
            self._assert_user_matches(listed_users[user_name], user_def)

        # Check that the system (ignored) users are not included in the output.
        system_users = self.get_system_users()
        asserts.assert_false(
            any(name in listed_users for name in system_users),
            "System users should not be included in the 'user-list' output.",
        )

        # Test list pagination.
        list_page = self.rd_client.users.list(instance_id, limit=limit)
        self.assert_client_code(expected_http_code)

        asserts.assert_true(len(list_page) <= limit)
        asserts.assert_is_not_none(list_page.next, "List page is missing.")
        marker = list_page.next

        self.assert_pagination_match(list_page, full_list, 0, limit)
        self.assert_pagination_match(list_page[-1:], full_list, limit - 1, limit)

        list_page = self.rd_client.users.list(instance_id, marker=marker)
        self.assert_client_code(expected_http_code)
        self.assert_pagination_match(list_page, full_list, limit, len(full_list))
Esempio n. 16
0
def shut_down():
    """Shut down the web service and destroys the database."""
    admin = mymodule.get_admin_client()
    if admin.service_is_up:
        mymodule.stop_web_server()
        assert_false(admin.service_is_up())
    mymodule.destroy_database()
Esempio n. 17
0
 def test_root_initially_disabled_details(self):
     """Use instance details to test that root is disabled."""
     instance = self.dbaas.instances.get(self.id)
     assert_true(hasattr(instance, 'rootEnabled'),
                 "Instance has no rootEnabled property.")
     assert_false(instance.rootEnabled, "Root SHOULD NOT be enabled.")
     assert_equal(self.root_enabled_timestamp, 'Never')
Esempio n. 18
0
 def test_sanitation(self):
   """Test dates that are infered through sanitation."""
   edgar_obj = Edgar('WMT')
   try:
     assert_equal(edgar_obj.get_filing_date(date(2009,1,31)), date(2009,4,1))
   except ValueError:
     assert_false(True, 'Unable to find filing date for period ending 2009-01-31.')
Esempio n. 19
0
    def cluster_deletion(self):
        """
        Scenario:
            1. Revert snapshot 'prepare_ha_neutron'
            2. Delete cluster via cli
            3. Check cluster absence in the list

        Duration 25m

        """
        self.env.revert_snapshot("prepare_ha_neutron")

        remote = self.env.d_env.get_admin_remote()
        cluster_id = self.fuel_web.get_last_created_cluster()
        assert_true(
            remote.execute('fuel --env {0} env delete'.format(cluster_id))
            ['exit_code'] == 0)
        try:
            wait(lambda:
                 remote.execute(
                     "fuel env |  awk '{print $1}' |  tail -n 1 | grep '^.$'")
                 ['exit_code'] == 1, timeout=60 * 6)
        except TimeoutError:
            raise TimeoutError(
                "cluster {0} was not deleted".format(cluster_id))
        assert_false(
            check_cluster_presence(cluster_id, self.env.postgres_actions),
            "cluster {0} is found".format(cluster_id))
Esempio n. 20
0
 def find_flavor_self_href(flavor):
     self_links = [link for link in flavor.links if link['rel'] == 'self']
     asserts.assert_true(len(self_links) > 0, "Flavor had no self href!")
     flavor_href = self_links[0]['href']
     asserts.assert_false(flavor_href is None,
                          "Flavor link self href missing.")
     return flavor_href
Esempio n. 21
0
    def test_submit_test_event(self):
        """ Testing POST /EventService/SubmitTestEvent  """
        global task
        # Suppose rackhd and test stack have the same localhost
        server = Httpd(port=int(self.__httpd_port),
                       handler_class=self.EventServiceHandler)
        task = WorkerThread(server, 'httpd')
        worker = WorkerTasks(tasks=[task], func=self.__httpd_start)
        worker.run()
        redfish().test_event(body={})
        worker.wait_for_completion(timeout_sec=60)
        if task.timeout:
            # Else port forward rackhd -> localhost
            server = Httpd(port=int(HTTPD_PORT),
                           handler_class=self.EventServiceHandler)
            task = WorkerThread(server, 'httpd')
            worker = WorkerTasks(tasks=[task], func=self.__httpd_start)
            worker.run()

            # forward port for services running on a guest host
            session = open_ssh_forward(self.__httpd_port)

            redfish().test_event(body={})
            worker.wait_for_completion(timeout_sec=60)
            session.logout()
        assert_false(task.timeout,
                     message='timeout waiting for task {0}'.format(task.id))
Esempio n. 22
0
    def check_emc_cinder_config(cls, ip, path):
        command = 'cat {0}'.format(path)
        conf_data = SSHManager().execute_on_remote(ip, command)['stdout_str']
        conf_data = cStringIO(conf_data)
        cinder_conf = configparser.ConfigParser()
        cinder_conf.readfp(conf_data)

        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'volume_driver'),
            'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver')
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'),
            'global')
        asserts.assert_false(
            cinder_conf.getboolean('DEFAULT', 'destroy_empty_storage_group'))
        asserts.assert_true(
            cinder_conf.getboolean('DEFAULT', 'initiator_auto_registration'))
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1)
        asserts.assert_equal(cinder_conf.getint('DEFAULT', 'default_timeout'),
                             10)
        asserts.assert_equal(cinder_conf.get('DEFAULT', 'naviseccli_path'),
                             '/opt/Navisphere/bin/naviseccli')

        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip'))
        asserts.assert_true(
            cinder_conf.has_option('DEFAULT', 'san_secondary_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password'))
Esempio n. 23
0
    def ceph_multinode_with_cinder(self):
        """Deploy ceph with cinder in simple mode

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 2 nodes with cinder and ceph OSD roles
            5. Deploy the cluster
            6. Check ceph status
            7. Check partitions on controller node

        Snapshot ceph_multinode_with_cinder

        """
        if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
            raise SkipTest()

        self.env.revert_snapshot("ready")
        self.env.bootstrap_nodes(self.env.nodes().slaves[:4])

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_SIMPLE,
            settings={
                'volumes_ceph': False,
                'images_ceph': True,
                'volumes_lvm': True,
                'tenant': 'ceph2',
                'user': '******',
                'password': '******'
            })
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder', 'ceph-osd'],
                'slave-04': ['cinder', 'ceph-osd']
            })
        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)
        check_ceph_health(self.env.get_ssh_to_remote_by_name('slave-01'))

        disks = self.fuel_web.client.get_node_disks(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['id'])

        logger.info("Current disk partitions are: \n{d}".format(d=disks))

        logger.info("Check unallocated space")
        # We expect failure here only for release 5.0 due to bug
        # https://bugs.launchpad.net/fuel/+bug/1306625, so it is
        # necessary to assert_true in the next release.
        assert_false(
            checkers.check_unallocated_space(disks, contr_img_ceph=True),
            "Check unallocated space on controller")

        # Run ostf
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_multinode_with_cinder")
Esempio n. 24
0
 def restart_rabbit_again(self):
     """Now stop and start rabbit, ensuring the agent reconnects."""
     self.rabbit.stop()
     assert_false(self.rabbit.is_alive)
     self.rabbit.reset()
     self.rabbit.start()
     assert_true(self.rabbit.is_alive)
Esempio n. 25
0
 def find_flavor_self_href(flavor):
     self_links = [link for link in flavor.links if link['rel'] == 'self']
     asserts.assert_true(len(self_links) > 0, "Flavor had no self href!")
     flavor_href = self_links[0]['href']
     asserts.assert_false(flavor_href is None,
                          "Flavor link self href missing.")
     return flavor_href
Esempio n. 26
0
    def negative_manual_cic_maintenance_mode(self):
        """Check negative scenario for manual maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Switch in maintenance mode
            4. Check the controller not switching in maintenance mode
            5. Check the controller become available

        Duration 45m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        for nailgun_node in self.env.d_env.nodes().slaves[0:3]:
            with self.fuel_web.get_ssh_for_node(nailgun_node.name) as remote:
                assert_true('True' in check_available_mode(remote),
                            "Maintenance mode is not available")

                logger.info('Maintenance mode for node %s is disable',
                            nailgun_node.name)
                result = remote.execute('umm disable')
                assert_equal(
                    result['exit_code'], 0,
                    'Failed to execute "{0}" on remote host: {1}'.format(
                        'umm disable', result))

                assert_false('True' in check_available_mode(remote),
                             "Maintenance mode should not be available")

                logger.info('Try to execute maintenance mode for node %s',
                            nailgun_node.name)
                result = remote.execute('umm on')
                assert_equal(
                    result['exit_code'], 1,
                    'Failed to execute "{0}" on remote host: {1}'.format(
                        'umm on', result))

            # If we don't disable maintenance mode,
            # the node would have gone to reboot, so we just expect
            time.sleep(30)
            assert_true(
                self.fuel_web.get_nailgun_node_by_devops_node(
                    nailgun_node)['online'],
                'Node {0} should be online after command "umm on"'.format(
                    nailgun_node.name))

            try:
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['ha', 'smoke', 'sanity'])
            except AssertionError:
                logger.debug("Test failed from first probe,"
                             " we sleep 300 second try one more time"
                             " and if it fails again - test will fails ")
                time.sleep(300)
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['ha', 'smoke', 'sanity'])
    def negative_manual_cic_maintenance_mode(self):
        """Check negative scenario for manual maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Switch in maintenance mode
            4. Check the controller not switching in maintenance mode
            5. Check the controller become available

        Duration 45m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Select a non-primary controller
        regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02")
        dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(
            regular_ctrl)
        _ip = regular_ctrl['ip']
        _id = regular_ctrl['id']

        asserts.assert_true('True' in check_available_mode(_ip),
                            "Maintenance mode is not available")
        self.ssh_manager.execute_on_remote(
            ip=_ip,
            cmd="umm disable")

        asserts.assert_false('True' in check_available_mode(_ip),
                             "Maintenance mode should not be available")

        logger.info('Try to execute maintenance mode '
                    'for node-{0}'.format(_id))

        self.ssh_manager.execute_on_remote(
            ip=_ip,
            cmd="umm on",
            assert_ec_equal=[1])

        # If we don't disable maintenance mode,
        # the node would have gone to reboot, so we just expect
        time.sleep(30)
        asserts.assert_true(
            self.fuel_web.get_nailgun_node_by_devops_node(dregular_ctrl)
            ['online'],
            'Node-{0} should be online after command "umm on"'.
            format(_id))

        try:
            self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke',
                                                          'sanity'])
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 300 second try one more time"
                         " and if it fails again - test will fails ")
            time.sleep(300)
            self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke',
                                                          'sanity'])
Esempio n. 28
0
 def remove_bmc_user_test(self):
     """ Testing if decommission workflow removed bmc user from compute nodes"""
     for node in self.__nodes:
         ipmi_resp = self.get_ipmi_user_list(node['id'])
         assert_false(
             BMC_USER in ipmi_resp,
             message='failed to delete bmc user {0} for node id {1}'.format(
                 BMC_USER, node['id']))
Esempio n. 29
0
 def test_create_users_list_system(self):
     # tests for users that should not be listed
     users = self.dbaas.users.list(instance_info.id)
     assert_equal(200, self.dbaas.last_http_code)
     for user in self.system_users:
         found = any(result.name == user for result in users)
         msg = "User '%s' SHOULD NOT BE found in result" % user
         assert_false(found, msg)
 def show_databases(self, user, password):
     cmd = "sudo mysql -h %s -u '%s' -p'%s' -e 'show databases;'" % (instance_info.user_ip, user, password)
     print("Running cmd: %s" % cmd)
     dblist, err = process(cmd)
     print("returned: %s" % dblist)
     if err:
         assert_false(True, err)
     return dblist
 def setUp(self):
     """Sets up the client."""
     wait_until_scheduler_is_ready()
     test_config.volume_service.stop()
     assert_false(test_config.volume_service.is_running)
     restart_compute_service(["--trove_volume_time_out=%d" % VOLUME_TIME_OUT])
     self.init("TEST_FAIL_VOLUME_")
     self.instance_exists = False
 def test_create_users_list_system(self):
     # tests for users that should not be listed
     users = self.dbaas.users.list(instance_info.id)
     found = False
     for user in self.system_users:
         found = any(result.name == user for result in users)
         assert_false(found, "User '%s' SHOULD NOT BE found in result" % user)
         found = False
Esempio n. 33
0
 def remove_bmc_user_test(self):
     """ Testing if decommission workflow removed bmc user from compute nodes"""
     for node in self.__get_used_nodes():
         ipmi_resp = self.get_ipmi_user_list(node['id'])
         for item in ipmi_resp.splitlines():
             if BMC_USER in item:
                 assert_false('ADMINISTRATOR' in item,
                              message='failed to delete bmc user {0} for node id {1}'.format(BMC_USER, node['id']))
Esempio n. 34
0
 def test_create_users_list_system(self):
     #tests for users that should not be listed
     users = self.dbaas.users.list(instance_info.id)
     assert_equal(200, self.dbaas.last_http_code)
     for user in self.system_users:
         found = any(result.name == user for result in users)
         msg = "User '%s' SHOULD NOT BE found in result" % user
         assert_false(found, msg)
 def test_create_database_list_system(self):
     #Databases that should not be returned in the list
     databases = self.dbaas.databases.list(instance_info.id)
     found = False
     for db in self.system_dbs:
         found = any(result.name == db for result in databases)
         assert_false(found, "Database '%s' SHOULD NOT be found in result" % db)
         found = False
Esempio n. 36
0
 def test_symbol_deleted_when_last_position_removed(self):
     """Test that the remove_item method of Portfolio removes unused sybmols."""
     portfolio = Portfolio(100)
     start_date = date(2000, 1, 1)
     p1 = OpenPosition("FOO", start_date, 100, 100)
     portfolio.add_position(p1)
     portfolio.remove_position(p1)
     assert_false(p1.symbol in portfolio.symbols)
Esempio n. 37
0
 def find_flavor_and_self_href(self, flavor_id, flavor_manager=None):
     """Given an ID, returns flavor and its self href."""
     flavor_manager = flavor_manager or self.flavors
     asserts.assert_false(flavor_id is None)
     flavor = flavor_manager.get(flavor_id)
     asserts.assert_false(flavor is None)
     flavor_href = self.find_flavor_self_href(flavor)
     return flavor, flavor_href
Esempio n. 38
0
 def find_flavor_and_self_href(self, flavor_id, flavor_manager=None):
     """Given an ID, returns flavor and its self href."""
     flavor_manager = flavor_manager or self.flavors
     asserts.assert_false(flavor_id is None)
     flavor = flavor_manager.get(flavor_id)
     asserts.assert_false(flavor is None)
     flavor_href = self.find_flavor_self_href(flavor)
     return flavor, flavor_href
Esempio n. 39
0
    def check_mtu_size_between_instances(self, mtu_offset):
        """Check private network mtu size

        Scenario:
            1. Boot two instances on different compute hosts
            2. Ping one from another with 1472 bytes package
            3. Ping one from another with 8972 bytes package
            4. Ping one from another with 8973 bytes package
            5. Ping one from another with 14472 bytes package
            6. Delete instances

        """
        cluster_id = self.fuel_web.get_last_created_cluster()
        self.os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        instances = []
        fixed_net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        for hypervisor in self.os_conn.get_hypervisors():
            instances.append(
                self.boot_instance_on_node(hypervisor.hypervisor_hostname,
                                           label=fixed_net_name))

        source_instance = instances[0]
        for destination_instance in instances[1:]:
            asserts.assert_true(
                self.ping_instance_from_instance(
                    source_instance=source_instance,
                    destination_instance=destination_instance,
                    size=1472 - mtu_offset, count=3, net_name=fixed_net_name),
                "Ping response was not received for 1500 bytes package")

            asserts.assert_true(
                self.ping_instance_from_instance(
                    source_instance=source_instance,
                    destination_instance=destination_instance,
                    size=8972 - mtu_offset, count=3, net_name=fixed_net_name),
                "Ping response was not received for 9000 bytes package")

            asserts.assert_false(
                self.ping_instance_from_instance(
                    source_instance=source_instance,
                    destination_instance=destination_instance,
                    size=8973 - mtu_offset, count=3, net_name=fixed_net_name),
                "Ping response was received for 9001 bytes package")

            asserts.assert_false(
                self.ping_instance_from_instance(
                    source_instance=source_instance,
                    destination_instance=destination_instance,
                    size=14472 - mtu_offset, count=3,
                    net_name=fixed_net_name),
                "Ping response was received for 15000 bytes package")

        for instance in instances:
            self.os_conn.delete_instance(instance)
            self.os_conn.verify_srv_deleted(instance)
Esempio n. 40
0
def no_client_auth_initially():
    # wc = Webclient()
    # assert_false(wc.is_authenticated())

    mc = Mobileclient()
    assert_false(mc.is_authenticated())

    mm = Musicmanager()
    assert_false(mm.is_authenticated())
Esempio n. 41
0
 def delete_volume_and_wait(self, volume, timeout=60):
     self.delete_volume(volume)
     try:
         helpers.wait(lambda: volume not in self.cinder.volumes.list(),
                      timeout=timeout)
     except TimeoutError:
         asserts.assert_false(
             volume in self.cinder.volumes.list(),
             "Volume wasn't deleted in {0} sec".format(timeout))
Esempio n. 42
0
 def test_create_database_list_system(self):
     #Databases that should not be returned in the list
     databases = self.dbaas.databases.list(instance_info.id)
     found = False
     for db in self.system_dbs:
         found = any(result.name == db for result in databases)
         msg = "Database '%s' SHOULD NOT be found in result" % db
         assert_false(found, msg)
         found = False
Esempio n. 43
0
def session_logout():
    for s in create_sessions():
        s.is_authenticated = True
        old_session = s._rsession
        s.logout()

        assert_false(s.is_authenticated)
        old_session.close.assert_called_once_with()
        assert_is_not(s._rsession, old_session)
Esempio n. 44
0
 def show_databases(self, user, password):
     cmd = "sudo mysql -h %s -u '%s' -p'%s' -e 'show databases;'"\
             % (instance_info.get_address(), user, password)
     print("Running cmd: %s" % cmd)
     dblist, err = process(cmd)
     print("returned: %s" % dblist)
     if err:
         assert_false(True, err)
     return dblist
Esempio n. 45
0
 def test_delete_database(self):
     self.dbaas.databases.delete(instance_info.id, self.dbname_urlencoded)
     if not FAKE:
         time.sleep(5)
     dbs = self.dbaas.databases.list(instance_info.id)
     found = any(result.name == self.dbname_urlencoded for result in dbs)
     assert_false(
         found, "Database '%s' SHOULD NOT be found in result" %
         self.dbname_urlencoded)
Esempio n. 46
0
 def setUp(self):
     """Sets up the client."""
     wait_until_scheduler_is_ready()
     test_config.volume_service.stop()
     assert_false(test_config.volume_service.is_running)
     restart_compute_service(['--reddwarf_volume_time_out=%d'
                              % VOLUME_TIME_OUT])
     self.init("TEST_FAIL_VOLUME_")
     self.instance_exists = False
Esempio n. 47
0
    def test_root_initially_disabled(self):
        """Test that root is disabled."""
        enabled = self.dbaas.root.is_root_enabled(self.id)
        assert_equal(200, self.dbaas.last_http_code)

        is_enabled = enabled
        if hasattr(enabled, 'rootEnabled'):
            is_enabled = enabled.rootEnabled
        assert_false(is_enabled, "Root SHOULD NOT be enabled.")
def get_vz_ip_for_device(instance_id, device):
    """Get the IP of the device within openvz for the specified instance"""
    ip, err = process("""sudo vzctl exec %(instance_id)s ifconfig %(device)s"""
                      """ | awk '/inet addr/{gsub(/addr:/,"");print $2}'""" %
                      locals())
    if err:
        assert_false(True, err)
    else:
        return ip.strip()
    def negative_manual_cic_maintenance_mode(self):
        """Check negative scenario for manual maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Switch in maintenance mode
            4. Check the controller not switching in maintenance mode
            5. Check the controller become available

        Duration 45m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for devops_node in d_ctrls:
            _ip = self.fuel_web.get_nailgun_node_by_name(
                devops_node.name)['ip']
            asserts.assert_true('True' in checkers.check_available_mode(_ip),
                                "Maintenance mode is not available")
            self.ssh_manager.execute_on_remote(ip=_ip, cmd="umm disable")

            asserts.assert_false('True' in checkers.check_available_mode(_ip),
                                 "Maintenance mode should not be available")

            logger.info('Try to execute maintenance mode '
                        'for node {0}'.format(devops_node.name))

            self.ssh_manager.execute_on_remote(ip=_ip,
                                               cmd="umm on",
                                               assert_ec_equal=[1])

            # If we don't disable maintenance mode,
            # the node would have gone to reboot, so we just expect
            time.sleep(30)
            asserts.assert_true(
                self.fuel_web.get_nailgun_node_by_devops_node(
                    devops_node)['online'],
                'Node {0} should be online after command "umm on"'.format(
                    devops_node.name))

            try:
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['ha', 'smoke', 'sanity'])
            except AssertionError:
                logger.debug("Test failed from first probe,"
                             " we sleep 300 second try one more time"
                             " and if it fails again - test will fails ")
                time.sleep(300)
                self.fuel_web.run_ostf(cluster_id,
                                       test_sets=['ha', 'smoke', 'sanity'])
Esempio n. 50
0
 def remove_bmc_user_test(self):
     """ Testing if decommission workflow removed bmc user from compute nodes"""
     for node in self.__get_used_nodes():
         ipmi_resp = self.get_ipmi_user_list(node['id'])
         for item in ipmi_resp.splitlines():
             if BMC_USER in item:
                 assert_false(
                     'ADMINISTRATOR' in item,
                     message='failed to delete bmc user {0} for node id {1}'
                     .format(BMC_USER, node['id']))
Esempio n. 51
0
    def check_interfaces_config_after_reboot(self):
        network_settings = dict()
        skip_interfaces = {
            r'^pub-base$', r'^vr_pub-base$', r'^vr-base$', r'^mgmt-base$',
            r'^vr-host-base$', r'^mgmt-conntrd$', r'^hapr-host$',
            r'^(tap|qr-|qg-|p_).*$', r'^v_vrouter.*$',
            r'^v_(management|public)$'
        }

        nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id)

        for node in nodes:
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                network_settings[node['hostname']] = \
                    get_net_settings(remote, skip_interfaces)

        self.fuel_web.warm_restart_nodes(
            self.fuel_web.get_devops_nodes_by_nailgun_nodes(nodes))

        network_settings_changed = False

        for node in nodes:
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                saved_settings = network_settings[node['hostname']]
                actual_settings = get_net_settings(remote, skip_interfaces)
                if not saved_settings == actual_settings:
                    network_settings_changed = True
                    logger.error('Network settings were changed after reboot '
                                 'on node {0}! '.format(node['hostname']))
                    logger.debug('Network settings before the reboot of slave '
                                 '{0}: {1}'.format(node['hostname'],
                                                   saved_settings))
                    logger.debug('Network settings after the reboot of slave '
                                 '{0}: {1}'.format(node['hostname'],
                                                   actual_settings))

                    for iface in saved_settings:
                        if iface not in actual_settings:
                            logger.error("Interface '{0}' doesn't exist after "
                                         "reboot of '{1}'!".format(
                                             iface, node['hostname']))
                            continue
                        if saved_settings[iface] != actual_settings[iface]:
                            logger.error("Interface '{0}' settings "
                                         "were changed after reboot "
                                         "of '{1}': was  {2}, now "
                                         "{3}.".format(iface, node['hostname'],
                                                       saved_settings[iface],
                                                       actual_settings[iface]))

        assert_false(
            network_settings_changed,
            "Network settings were changed after environment nodes "
            "reboot! Please check logs for details!")
Esempio n. 52
0
 def _wait_until_graph_finish(self, graph_name, timevalue):
     self.__graph_name = graph_name
     self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \
                                           callbacks=[self.__handle_graph_finish]), \
                                graph_name)
     def start(worker, id):
         worker.start()
     tasks = WorkerTasks(tasks=[self.__task], func=start)
     tasks.run()
     tasks.wait_for_completion(timeout_sec=timevalue)
     assert_false(self.__task.timeout, \
         message='timeout waiting for task {0}'.format(self.__task.id))
    def negative_manual_cic_maintenance_mode(self):
        """Check negative scenario for manual maintenance mode

        Scenario:
            1. Revert snapshot
            2. Disable UMM
            3. Switch in maintenance mode
            4. Check the controller not switching in maintenance mode
            5. Check the controller become available

        Duration 45m
        """
        self.env.revert_snapshot('cic_maintenance_mode')

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Select a non-primary controller
        regular_ctrl = self.fuel_web.get_nailgun_node_by_name("slave-02")
        dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(
            regular_ctrl)
        _ip = regular_ctrl['ip']
        _id = regular_ctrl['id']

        asserts.assert_true('True' in check_available_mode(_ip),
                            "Maintenance mode is not available")
        self.ssh_manager.check_call(ip=_ip, command="umm disable")

        asserts.assert_false('True' in check_available_mode(_ip),
                             "Maintenance mode should not be available")

        logger.info('Try to execute maintenance mode '
                    'for node-{0}'.format(_id))

        self.ssh_manager.check_call(ip=_ip, command="umm on", expected=[1])

        # If we don't disable maintenance mode,
        # the node would have gone to reboot, so we just expect
        time.sleep(30)
        asserts.assert_true(
            self.fuel_web.get_nailgun_node_by_devops_node(
                dregular_ctrl)['online'],
            'Node-{0} should be online after command "umm on"'.format(_id))

        try:
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['ha', 'smoke', 'sanity'])
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 300 second try one more time"
                         " and if it fails again - test will fails ")
            time.sleep(300)
            self.fuel_web.run_ostf(cluster_id,
                                   test_sets=['ha', 'smoke', 'sanity'])
def create_dns_entry(id, uuid):
    """Given the instance_Id and it's owner returns the DNS entry."""
    entry_factory = get_dns_entry_factory()
    instance_id = str(id)
    entry = entry_factory.create_entry(instance_id)
    # There is a lot of test code which calls this and then, if the entry
    # is None, does nothing. That's actually how the contract for this class
    # works. But we want to make sure that if the RsDnsDriver is defined in the
    # flags we are returning something other than None and running those tests.
    if should_run_rsdns_tests():
        assert_false(entry is None, "RsDnsDriver needs real entries.")
    return entry