Exemplo n.º 1
0
    def test_config_download_no_deployment_uuid(self, mock_deployment_data,
                                                mock_config_dict,
                                                mock_deployment_resource_id,
                                                mock_git_init):
        heat = mock.MagicMock()
        self.config = ooo_config.Config(heat)
        stack = mock.MagicMock()
        heat.stacks.get.return_value = stack
        heat.resources.get.return_value = mock.MagicMock()

        stack.outputs = [{
            'output_key': 'RoleNetHostnameMap',
            'output_value': {
                'Controller': {
                    'ctlplane':
                    ['overcloud-controller-0.ctlplane.localdomain']
                },
                'Compute': {
                    'ctlplane': [
                        'overcloud-novacompute-0.ctlplane.localdomain',
                        'overcloud-novacompute-1.ctlplane.localdomain',
                        'overcloud-novacompute-2.ctlplane.localdomain'
                    ]
                }
            }
        }, {
            'output_key': 'ServerIdData',
            'output_value': {
                'server_ids': {
                    'Controller': ['00b3a5e1-5e8e-4b55-878b-2fa2271f15ad'],
                    'Compute': [
                        'a7db3010-a51f-4ae0-a791-2364d629d20d',
                        '8b07cd31-3083-4b88-a433-955f72039e2c',
                        '169b46f8-1965-4d90-a7de-f36fb4a830fe'
                    ]
                }
            }
        }, {
            'output_key': 'RoleNetworkConfigMap',
            'output_value': {}
        }, {
            'output_key': 'RoleGroupVars',
            'output_value': {
                'Controller': {
                    'any_errors_fatal': 'yes',
                    'max_fail_percentage': 15
                },
                'Compute': {
                    'any_errors_fatal': 'yes',
                    'max_fail_percentage': 15
                },
            }
        }]
        deployment_data, configs = self._get_config_data('config_data.yaml')

        # Set the deployment to TripleOSoftwareDeployment for the first
        # deployment
        deployment_data[0].attributes['value']['deployment'] = \
            'TripleOSoftwareDeployment'

        # Set the physical_resource_id as '' for the second deployment
        deployment_data[1].attributes['value']['deployment'] = ''

        self.configs = configs
        self.deployments = deployment_data
        mock_deployment_data.return_value = deployment_data
        mock_config_dict.side_effect = self._get_config_dict
        mock_deployment_resource_id.side_effect = self._get_deployment_id

        self.tmp_dir = self.useFixture(fixtures.TempDir()).path
        with warnings.catch_warnings(record=True) as w:
            self.config.download_config(stack, self.tmp_dir)
            assert "Skipping deployment" in str(w[-1].message)
            assert "Skipping deployment" in str(w[-2].message)
Exemplo n.º 2
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        ec2utils.reset_cache()
        self.useFixture(fixtures.TempDir()).path
        fake_utils.stub_out_utils_spawn_n(self.stubs)
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   volume_api_class='nova.tests.unit.fake_volume.API')

        def fake_show(meh, context, id, **kwargs):
            return {
                'id': id,
                'name': 'fake_name',
                'container_format': 'ami',
                'status': 'active',
                'properties': {
                    'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'type': 'machine',
                    'image_state': 'available'
                }
            }

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image['name'] = kwargs.get('filters', {}).get('name')
            return [image]

        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')

        # Short-circuit the conductor service
        self.flags(use_local=True, group='conductor')

        # Stub out the notification service so we use the no-op serializer
        # and avoid lazy-load traces with the wrap_exception decorator in
        # the compute service.
        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        # set up services
        self.conductor = self.start_service('conductor',
                                            manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduler = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.consoleauth = self.start_service('consoleauth')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)
        self.volume_api = volume.API()
        self.volume_api.reset_fake_api(self.context)

        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context,
                           'cedef40a-ed67-4d10-800e-17455edce175')
        db.s3_image_create(self.context,
                           '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
Exemplo n.º 3
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        ec2utils.reset_cache()
        vol_tmpdir = self.useFixture(fixtures.TempDir()).path
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   volume_api_class='nova.tests.fake_volume.API')

        def fake_show(meh, context, id):
            return {
                'id': id,
                'name': 'fake_name',
                'container_format': 'ami',
                'status': 'active',
                'properties': {
                    'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'type': 'machine',
                    'image_state': 'available'
                }
            }

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image['name'] = kwargs.get('filters', {}).get('name')
            return [image]

        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')

        # Short-circuit the conductor service
        self.flags(use_local=True, group='conductor')

        # set up services
        self.conductor = self.start_service('conductor',
                                            manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduler = self.start_service('scheduler')
        self.network = self.start_service('network')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)
        self.volume_api = volume.API()
        self.volume_api.reset_fake_api(self.context)

        # NOTE(comstud): Make 'cast' behave like a 'call' which will
        # ensure that operations complete
        self.stubs.Set(rpc, 'cast', rpc.call)

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.api.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.api.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
Exemplo n.º 4
0
 def test_nbd_no_devices(self):
     tempdir = self.useFixture(fixtures.TempDir()).path
     self.stub_out('nova.virt.disk.mount.nbd.NbdMount._detect_nbd_devices',
                   _fake_detect_nbd_devices_none)
     n = nbd.NbdMount(self.file, tempdir)
     self.assertIsNone(n._allocate_nbd())
Exemplo n.º 5
0
    def _setup_fakelibvirt(self):
        # So that the _supports_direct_io does the test based
        # on the current working directory, instead of the
        # default instances_path which doesn't exist
        self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)

        # Put fakelibvirt in place
        if 'libvirt' in sys.modules:
            self.saved_libvirt = sys.modules['libvirt']
        else:
            self.saved_libvirt = None

        import nova.tests.virt.libvirt.fake_imagebackend as fake_imagebackend
        import nova.tests.virt.libvirt.fake_libvirt_utils as fake_libvirt_utils
        import nova.tests.virt.libvirt.fakelibvirt as fakelibvirt

        sys.modules['libvirt'] = fakelibvirt
        import nova.virt.libvirt.driver
        import nova.virt.libvirt.firewall

        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.driver.imagebackend',
                                 fake_imagebackend))
        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.driver.libvirt',
                                 fakelibvirt))
        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.driver.libvirt_utils',
                                 fake_libvirt_utils))
        self.useFixture(
            fixtures.MonkeyPatch(
                'nova.virt.libvirt.imagebackend.libvirt_utils',
                fake_libvirt_utils))
        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.firewall.libvirt',
                                 fakelibvirt))

        self.flags(rescue_image_id="2",
                   rescue_kernel_id="3",
                   rescue_ramdisk_id=None,
                   libvirt_snapshots_directory='./')

        def fake_extend(image, size):
            pass

        def fake_migrateToURI(*a):
            pass

        def fake_make_drive(_self, _path):
            pass

        def fake_get_instance_disk_info(_self,
                                        instance,
                                        xml=None,
                                        block_device_info=None):
            return '[]'

        def fake_delete_instance_files(_self, _instance):
            pass

        self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
                       'get_instance_disk_info', fake_get_instance_disk_info)

        self.stubs.Set(nova.virt.libvirt.driver.disk, 'extend', fake_extend)

        self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
                       '_delete_instance_files', fake_delete_instance_files)

        # Like the existing fakelibvirt.migrateToURI, do nothing,
        # but don't fail for these tests.
        self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain, 'migrateToURI',
                       fake_migrateToURI)

        # We can't actually make a config drive v2 because ensure_tree has
        # been faked out
        self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive',
                       fake_make_drive)
Exemplo n.º 6
0
 def test_inner_get_dev_no_devices(self):
     tempdir = self.useFixture(fixtures.TempDir()).path
     self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
                    _fake_detect_nbd_devices_none)
     n = nbd.NbdMount(None, tempdir)
     self.assertFalse(n._inner_get_dev())
Exemplo n.º 7
0
    def _setup_fakelibvirt(self):
        # So that the _supports_direct_io does the test based
        # on the current working directory, instead of the
        # default instances_path which doesn't exist
        self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)

        # Put fakelibvirt in place
        if 'libvirt' in sys.modules:
            self.saved_libvirt = sys.modules['libvirt']
        else:
            self.saved_libvirt = None

        from nova.tests.unit.virt.libvirt import fake_imagebackend
        from nova.tests.unit.virt.libvirt import fakelibvirt

        from nova.tests.unit.virt.libvirt import fake_os_brick_connector

        self.useFixture(fake_imagebackend.ImageBackendFixture())
        self.useFixture(fakelibvirt.FakeLibvirtFixture())

        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.driver.connector',
                                 fake_os_brick_connector))

        self.useFixture(
            fixtures.MonkeyPatch(
                'nova.virt.libvirt.host.Host._conn_event_thread',
                lambda *args: None))

        self.flags(rescue_image_id="2",
                   rescue_kernel_id="3",
                   rescue_ramdisk_id=None,
                   snapshots_directory='./',
                   sysinfo_serial='none',
                   group='libvirt')

        def fake_wait():
            pass

        def fake_detach_device_with_retry(_self, get_device_conf_func, device,
                                          live, *args, **kwargs):
            # Still calling detach, but instead of returning function
            # that actually checks if device is gone from XML, just continue
            # because XML never gets updated in these tests
            _self.detach_device(get_device_conf_func(device), live=live)
            return fake_wait

        self.stub_out(
            'nova.virt.libvirt.driver.LibvirtDriver.'
            '_get_instance_disk_info_from_config',
            lambda self, guest_config, block_device_info: [])
        self.stub_out('nova.virt.disk.api.extend', lambda image, size: None)
        self.stub_out(
            'nova.virt.libvirt.driver.LibvirtDriver.'
            'delete_instance_files', lambda self, instance: None)
        self.stub_out('nova.virt.libvirt.guest.Guest.detach_device_with_retry',
                      fake_detach_device_with_retry)
        self.stub_out('nova.virt.libvirt.guest.Guest.migrate',
                      lambda self, destination, migrate_uri=None, migrate_disks
                      =None, destination_xml=None, flags=0, bandwidth=0: None)
        # We can't actually make a config drive v2 because ensure_tree has
        # been faked out
        self.stub_out('nova.virt.configdrive.ConfigDriveBuilder.make_drive',
                      lambda self, path: None)
Exemplo n.º 8
0
 def test_get_options_from_ini_empty_directory_no_target(self):
     # Test that no config options are loaded when an empty directory is
     # provided as the ini path and no target directory is provided
     ini_directory = self.useFixture(fixtures.TempDir()).path
     self.assertIsNone(bandit._get_options_from_ini(ini_directory, []))
Exemplo n.º 9
0
 def test_get_options_from_ini_no_ini_path_no_bandit_files(self):
     # Test that no config options are loaded when no ini path is provided
     # and the target directory contains no bandit config files (.bandit)
     target_directory = self.useFixture(fixtures.TempDir()).path
     self.assertIsNone(
         bandit._get_options_from_ini(None, [target_directory]))
Exemplo n.º 10
0
 def setUp(self):
     super(TestTimeDataBase, self).setUp()
     self.tmp_root = self.useFixture(fixtures.TempDir(
         rootdir=os.environ.get("ZUUL_TEST_ROOT"))
     ).path
     self.db = model.TimeDataBase(self.tmp_root)
Exemplo n.º 11
0
 def setUp(self):
     super(TestJobTimeData, self).setUp()
     self.tmp_root = self.useFixture(fixtures.TempDir(
         rootdir=os.environ.get("ZUUL_TEST_ROOT"))
     ).path
Exemplo n.º 12
0
    def _setUp(self):
        super()._setUp()

        self.core_base_path = self.useFixture(fixtures.TempDir()).path

        binaries_path = os.path.abspath(
            os.path.join(__file__, '..', 'bin', 'elf'))

        new_binaries_path = self.useFixture(fixtures.TempDir()).path
        current_path = os.environ.get('PATH')
        new_path = '{}:{}'.format(new_binaries_path, current_path)
        self.useFixture(fixtures.EnvironmentVariable('PATH', new_path))

        # Copy strip
        for f in ['strip', 'execstack']:
            shutil.copy(os.path.join(binaries_path, f),
                        os.path.join(new_binaries_path, f))
            os.chmod(os.path.join(new_binaries_path, f), 0o755)

        # Some values in ldd need to be set with core_path
        with open(os.path.join(binaries_path, 'ldd')) as rf:
            with open(os.path.join(new_binaries_path, 'ldd'), 'w') as wf:
                for line in rf.readlines():
                    wf.write(line.replace('{CORE_PATH}', self.core_base_path))
        os.chmod(os.path.join(new_binaries_path, 'ldd'), 0o755)

        # Some values in ldd need to be set with core_path
        self.patchelf_path = os.path.join(new_binaries_path, 'patchelf')
        with open(os.path.join(binaries_path, 'patchelf')) as rf:
            with open(self.patchelf_path, 'w') as wf:
                for line in rf.readlines():
                    wf.write(line.replace('{VERSION}', self._patchelf_version))
        os.chmod(os.path.join(new_binaries_path, 'patchelf'), 0o755)

        patcher = mock.patch.object(elf.ElfFile,
                                    '_extract',
                                    new_callable=lambda: _fake_elffile_extract)
        patcher.start()
        self.addCleanup(patcher.stop)

        self._elf_files = {
            'fake_elf-2.26':
            elf.ElfFile(path=os.path.join(self.root_path, 'fake_elf-2.26')),
            'fake_elf-2.23':
            elf.ElfFile(path=os.path.join(self.root_path, 'fake_elf-2.23')),
            'fake_elf-1.1':
            elf.ElfFile(path=os.path.join(self.root_path, 'fake_elf-1.1')),
            'fake_elf-static':
            elf.ElfFile(path=os.path.join(self.root_path, 'fake_elf-static')),
            'fake_elf-shared-object':
            elf.ElfFile(
                path=os.path.join(self.root_path, 'fake_elf-shared-object')),
            'fake_elf-bad-ldd':
            elf.ElfFile(path=os.path.join(self.root_path, 'fake_elf-bad-ldd')),
            'fake_elf-bad-patchelf':
            elf.ElfFile(
                path=os.path.join(self.root_path, 'fake_elf-bad-patchelf')),
            'fake_elf-with-core-libs':
            elf.ElfFile(
                path=os.path.join(self.root_path, 'fake_elf-with-core-libs')),
            'fake_elf-with-execstack':
            elf.ElfFile(
                path=os.path.join(self.root_path, 'fake_elf-with-execstack')),
            'fake_elf-with-bad-execstack':
            elf.ElfFile(path=os.path.join(self.root_path,
                                          'fake_elf-with-bad-execstack')),
            'libc.so.6':
            elf.ElfFile(path=os.path.join(self.root_path, 'libc.so.6')),
            'libssl.so.1.0.0':
            elf.ElfFile(path=os.path.join(self.root_path, 'libssl.so.1.0.0')),
        }

        for elf_file in self._elf_files.values():
            with open(elf_file.path, 'wb') as f:
                f.write(b'\x7fELF')
                if elf_file.path.endswith('fake_elf-bad-patchelf'):
                    f.write(b'nointerpreter')

        self.root_libraries = {
            'foo.so.1': os.path.join(self.root_path, 'foo.so.1'),
        }

        for root_library in self.root_libraries.values():
            with open(root_library, 'wb') as f:
                f.write(b'\x7fELF')
Exemplo n.º 13
0
 def test_install_without_pbr(self):
     # Test easy-install of a thing that depends on a thing using pbr
     tempdir = self.useFixture(fixtures.TempDir()).path
     # A directory containing sdists of the things we're going to depend on
     # in using-package.
     dist_dir = os.path.join(tempdir, 'distdir')
     os.mkdir(dist_dir)
     self._run_cmd(sys.executable, ('setup.py', 'sdist', '-d', dist_dir),
                   allow_fail=False,
                   cwd=PBR_ROOT)
     # testpkg - this requires a pbr-using package
     test_pkg_dir = os.path.join(tempdir, 'testpkg')
     os.mkdir(test_pkg_dir)
     with open(os.path.join(test_pkg_dir, 'setup.py'), 'wt') as f:
         f.write(
             textwrap.dedent("""\
             #!/usr/bin/env python
             import setuptools
             setuptools.setup(
                 name = 'pkgTest',
                 tests_require = ['pkgReq'],
                 test_suite='pkgReq'
             )
             """))
     with open(os.path.join(test_pkg_dir, 'setup.cfg'), 'wt') as f:
         f.write(
             textwrap.dedent("""\
             [easy_install]
             find_links = %s
             """ % dist_dir))
     repoTest = self.useFixture(TestRepo(test_pkg_dir))
     repoTest.commit()
     # reqpkg - this is a package that requires pbr
     req_pkg_dir = os.path.join(tempdir, 'reqpkg')
     pkg_req_module = os.path.join(req_pkg_dir, 'pkgReq/')
     os.makedirs(pkg_req_module)
     with open(os.path.join(req_pkg_dir, 'setup.py'), 'wt') as f:
         f.write(
             textwrap.dedent("""\
             #!/usr/bin/env python
             import setuptools
             setuptools.setup(
                 setup_requires=['pbr'],
                 pbr=True
             )
             """))
     with open(os.path.join(req_pkg_dir, 'setup.cfg'), 'wt') as f:
         f.write(
             textwrap.dedent("""\
             [metadata]
             name = pkgReq
             """))
     with open(os.path.join(req_pkg_dir, 'requirements.txt'), 'wt') as f:
         f.write(
             textwrap.dedent("""\
             pbr
             """))
     with open(os.path.join(req_pkg_dir, 'pkgReq/__init__.py'), 'wt') as f:
         f.write(
             textwrap.dedent("""\
             print("FakeTest loaded and ran")
             """))
     repoReq = self.useFixture(TestRepo(req_pkg_dir))
     repoReq.commit()
     self._run_cmd(sys.executable, ('setup.py', 'sdist', '-d', dist_dir),
                   allow_fail=False,
                   cwd=req_pkg_dir)
     # A venv to test within
     venv = self.useFixture(Venv('nopbr', install_pbr=False))
     python = venv.python
     # Run the depending script
     self.useFixture(
         base.CapturedSubprocess('nopbr', [python] + ['setup.py', 'test'],
                                 cwd=test_pkg_dir))
Exemplo n.º 14
0
    def test_write_config(self, mock_deployment_data, mock_config_dict):
        heat = mock.MagicMock()
        self.config = ooo_config.Config(heat)
        stack = mock.MagicMock()
        heat.stacks.get.return_value = stack

        stack.outputs = [{
            'output_key': 'RoleNetHostnameMap',
            'output_value': {
                'Controller': {
                    'ctlplane':
                    ['overcloud-controller-0.ctlplane.localdomain']
                },
                'Compute': {
                    'ctlplane': [
                        'overcloud-novacompute-0.ctlplane.localdomain',
                        'overcloud-novacompute-1.ctlplane.localdomain',
                        'overcloud-novacompute-2.ctlplane.localdomain'
                    ]
                }
            }
        }, {
            'output_key': 'ServerIdData',
            'output_value': {
                'server_ids': {
                    'Controller': ['00b3a5e1-5e8e-4b55-878b-2fa2271f15ad'],
                    'Compute': [
                        'a7db3010-a51f-4ae0-a791-2364d629d20d',
                        '8b07cd31-3083-4b88-a433-955f72039e2c',
                        '169b46f8-1965-4d90-a7de-f36fb4a830fe'
                    ]
                }
            }
        }, {
            'output_key': 'RoleData',
            'output_value': {
                'Controller': {
                    'ansible_group_vars': {
                        'chrony_host': 'localhost',
                        'chrony_acl': 'none',
                    }
                }
            }
        }, {
            'output_key': 'RoleGroupVars',
            'output_value': {
                'Controller': {
                    'any_errors_fatal': True,
                    'chrony_host': '192.168.2.1',
                    'chrony_foo': 'bar',
                    'max_fail_percentage': 15
                },
                'Compute': {
                    'any_errors_fatal': True,
                    'max_fail_percentage': 15
                }
            }
        }, {
            'output_key': 'RoleNetworkConfigMap',
            'output_value': {}
        }]
        deployment_data, configs = \
            self._get_config_data('config_data.yaml')
        self.configs = configs
        self.deployments = deployment_data

        stack_data = self.config.fetch_config('overcloud')
        mock_deployment_data.return_value = deployment_data
        mock_config_dict.side_effect = self._get_config_dict
        config_dir = self.useFixture(fixtures.TempDir()).path

        self.config.write_config(stack_data, 'overcloud', config_dir)

        for f in [
                'Controller',
                'Compute',
        ]:
            with open(os.path.join(config_dir, 'group_vars', f)) as fin:
                self.assertEqual(yaml.safe_load(fin.read()),
                                 self._get_yaml_file(f))

        for d in [
                'ControllerHostEntryDeployment', 'NetworkDeployment',
                'MyExtraConfigPost', 'MyPostConfig'
        ]:
            with open(
                    os.path.join(config_dir, 'Controller',
                                 'overcloud-controller-0', d)) as fin:
                self.assertEqual(
                    yaml.safe_load(fin.read()),
                    self._get_yaml_file(
                        os.path.join('overcloud-controller-0', d)))

        for d in [
                'ComputeHostEntryDeployment', 'NetworkDeployment',
                'MyExtraConfigPost'
        ]:
            with open(
                    os.path.join(config_dir, 'Compute',
                                 'overcloud-novacompute-0', d)) as fin:
                self.assertEqual(
                    yaml.safe_load(fin.read()),
                    self._get_yaml_file(
                        os.path.join('overcloud-novacompute-0', d)))

        for d in [
                'ComputeHostEntryDeployment', 'NetworkDeployment',
                'MyExtraConfigPost'
        ]:
            with open(
                    os.path.join(config_dir, 'Compute',
                                 'overcloud-novacompute-1', d)) as fin:
                self.assertEqual(
                    yaml.safe_load(fin.read()),
                    self._get_yaml_file(
                        os.path.join('overcloud-novacompute-1', d)))

        for d in [
                'ComputeHostEntryDeployment', 'NetworkDeployment',
                'MyExtraConfigPost', 'AnsibleDeployment'
        ]:
            with open(
                    os.path.join(config_dir, 'Compute',
                                 'overcloud-novacompute-2', d)) as fin:
                self.assertEqual(
                    yaml.safe_load(fin.read()),
                    self._get_yaml_file(
                        os.path.join('overcloud-novacompute-2', d)))
Exemplo n.º 15
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()
        self.useFixture(
            nova_fixtures.Timeout(os.environ.get('OS_TEST_TIMEOUT', 0),
                                  self.TIMEOUT_SCALING_FACTOR))

        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())
        self.useFixture(log_fixture.get_logging_handle_error_fixture())

        self.useFixture(nova_fixtures.OutputStreamCapture())

        self.useFixture(nova_fixtures.StandardLogging())

        # NOTE(sdague): because of the way we were using the lock
        # wrapper we eneded up with a lot of tests that started
        # relying on global external locking being set up for them. We
        # consider all of these to be *bugs*. Tests should not require
        # global external locking, or if they do, they should
        # explicitly set it up themselves.
        #
        # The following REQUIRES_LOCKING class parameter is provided
        # as a bridge to get us there. No new tests should be added
        # that require it, and existing classes and tests should be
        # fixed to not need it.
        if self.REQUIRES_LOCKING:
            lock_path = self.useFixture(fixtures.TempDir()).path
            self.fixture = self.useFixture(
                config_fixture.Config(lockutils.CONF))
            self.fixture.config(lock_path=lock_path, group='oslo_concurrency')

        self.useFixture(conf_fixture.ConfFixture(CONF))
        self.useFixture(nova_fixtures.RPCFixture('nova.test'))

        if self.USES_DB:
            self.useFixture(nova_fixtures.Database())
            self.useFixture(nova_fixtures.Database(database='api'))

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(nova_fixtures.WarningsFixture())

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.NovaObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.NovaObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        # NOTE(mnaser): All calls to utils.is_neutron() are cached in
        # nova.utils._IS_NEUTRON.  We set it to None to avoid any
        # caching of that value.
        utils._IS_NEUTRON = None

        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(self._clear_attrs)
        self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
        self.policy = self.useFixture(policy_fixture.PolicyFixture())

        self.useFixture(nova_fixtures.PoisonFunctions())

        openstack_driver.DRIVER_CACHE = {}
Exemplo n.º 16
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name: strutils.bool_from_string(
            os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
        if environ_enabled('OS_LOG_CAPTURE'):
            log_format = '%(levelname)s [%(name)s] %(message)s'
            if environ_enabled('OS_DEBUG'):
                level = logging.DEBUG
            else:
                level = logging.INFO
            self.useFixture(
                fixtures.LoggerFixture(nuke_handlers=False,
                                       format=log_format,
                                       level=level))

        rpc.add_extra_exmods("cinder.tests")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api,
                                 migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.database.sqlite_db,
                                 sqlite_clean_db=CONF.sqlite_clean_db)
        self.useFixture(_DB_CACHE)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        self.mox = mox.Mox()
        self.stubs = stubout.StubOutForTesting()
        self.addCleanup(CONF.reset)
        self.addCleanup(self.mox.UnsetStubs)
        self.addCleanup(self.stubs.UnsetAll)
        self.addCleanup(self.stubs.SmartUnsetAll)
        self.addCleanup(self.mox.VerifyAll)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path, group='oslo_concurrency')
        self.override_config(
            'policy_file',
            os.path.join(
                os.path.abspath(os.path.join(
                    os.path.dirname(__file__),
                    '..',
                )), 'cinder/tests/policy.json'))
Exemplo n.º 17
0
    def _setUp(self):
        super()._setUp()

        self.core_base_path = self.useFixture(fixtures.TempDir()).path

        binaries_path = os.path.join(get_snapcraft_path(), "tests", "bin",
                                     "elf")

        new_binaries_path = self.useFixture(fixtures.TempDir()).path
        current_path = os.environ.get("PATH")
        new_path = "{}:{}".format(new_binaries_path, current_path)
        self.useFixture(fixtures.EnvironmentVariable("PATH", new_path))

        # Copy strip
        for f in ["strip", "execstack"]:
            shutil.copy(os.path.join(binaries_path, f),
                        os.path.join(new_binaries_path, f))
            os.chmod(os.path.join(new_binaries_path, f), 0o755)

        # Some values in ldd need to be set with core_path
        with open(os.path.join(binaries_path, "ldd")) as rf:
            with open(os.path.join(new_binaries_path, "ldd"), "w") as wf:
                for line in rf.readlines():
                    wf.write(line.replace("{CORE_PATH}", self.core_base_path))
        os.chmod(os.path.join(new_binaries_path, "ldd"), 0o755)

        # Some values in ldd need to be set with core_path
        self.patchelf_path = os.path.join(new_binaries_path, "patchelf")
        with open(os.path.join(binaries_path, "patchelf")) as rf:
            with open(self.patchelf_path, "w") as wf:
                for line in rf.readlines():
                    wf.write(line.replace("{VERSION}", self._patchelf_version))
        os.chmod(os.path.join(new_binaries_path, "patchelf"), 0o755)

        patcher = mock.patch.object(
            elf.ElfFile,
            "_extract_attributes",
            new_callable=lambda: _fake_elffile_extract_attributes,
        )
        patcher.start()
        self.addCleanup(patcher.stop)

        self._elf_files = {
            "fake_elf-2.26":
            elf.ElfFile(path=os.path.join(self.root_path, "fake_elf-2.26")),
            "fake_elf-2.23":
            elf.ElfFile(path=os.path.join(self.root_path, "fake_elf-2.23")),
            "fake_elf-1.1":
            elf.ElfFile(path=os.path.join(self.root_path, "fake_elf-1.1")),
            "fake_elf-static":
            elf.ElfFile(path=os.path.join(self.root_path, "fake_elf-static")),
            "fake_elf-shared-object":
            elf.ElfFile(
                path=os.path.join(self.root_path, "fake_elf-shared-object")),
            "fake_elf-with-host-libraries":
            elf.ElfFile(path=os.path.join(self.root_path,
                                          "fake_elf-with-host-libraries")),
            "fake_elf-bad-ldd":
            elf.ElfFile(path=os.path.join(self.root_path, "fake_elf-bad-ldd")),
            "fake_elf-bad-patchelf":
            elf.ElfFile(
                path=os.path.join(self.root_path, "fake_elf-bad-patchelf")),
            "fake_elf-with-core-libs":
            elf.ElfFile(
                path=os.path.join(self.root_path, "fake_elf-with-core-libs")),
            "fake_elf-with-missing-libs":
            elf.ElfFile(path=os.path.join(self.root_path,
                                          "fake_elf-with-missing-libs")),
            "fake_elf-with-execstack":
            elf.ElfFile(
                path=os.path.join(self.root_path, "fake_elf-with-execstack")),
            "fake_elf-with-bad-execstack":
            elf.ElfFile(path=os.path.join(self.root_path,
                                          "fake_elf-with-bad-execstack")),
            "libc.so.6":
            elf.ElfFile(path=os.path.join(self.root_path, "libc.so.6")),
            "libssl.so.1.0.0":
            elf.ElfFile(path=os.path.join(self.root_path, "libssl.so.1.0.0")),
        }

        for elf_file in self._elf_files.values():
            with open(elf_file.path, "wb") as f:
                f.write(b"\x7fELF")
                if elf_file.path.endswith("fake_elf-bad-patchelf"):
                    f.write(b"nointerpreter")

        self.root_libraries = {
            "foo.so.1": os.path.join(self.root_path, "foo.so.1"),
            "moo.so.2": os.path.join(self.root_path, "non-standard",
                                     "moo.so.2"),
        }

        barsnap_elf = os.path.join(self.core_base_path, "barsnap.so.2")
        elf_list = [*self.root_libraries.values(), barsnap_elf]

        for root_library in elf_list:
            os.makedirs(os.path.dirname(root_library), exist_ok=True)
            with open(root_library, "wb") as f:
                f.write(b"\x7fELF")
Exemplo n.º 18
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        p = mock.patch('cinder.rpc.get_notifier',
                       side_effect=self._get_joined_notifier)
        p.start()

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name: strutils.bool_from_string(
            os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api,
                                 migration,
                                 sql_connection=CONF.database.connection)
        self.useFixture(_DB_CACHE)

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.mock_notifier(self)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path, group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )), self.POLICY_PATH),
                             group='oslo_policy')

        self._disable_osprofiler()
        self._disallow_invalid_uuids()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}

        self.override_config('backend_url',
                             'file://' + lock_path,
                             group='coordination')
        coordination.COORDINATOR.start()
        self.addCleanup(coordination.COORDINATOR.stop)
Exemplo n.º 19
0
 def test_nbd_no_devices(self):
     tempdir = self.useFixture(fixtures.TempDir()).path
     self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
                    _fake_detect_nbd_devices_none)
     n = nbd.NbdMount(None, tempdir)
     self.assertEquals(None, n._allocate_nbd())
Exemplo n.º 20
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        self.patch('cinder.rpc.get_notifier',
                   side_effect=self._get_joined_notifier)

        if self.MOCK_WORKER:
            # Mock worker creation for all tests that don't care about it
            clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s'
            for method in ('create_worker', 'set_worker', 'unset_worker'):
                self.patch(clean_path % method, return_value=None)

        if self.MOCK_TOOZ:
            self.patch('cinder.coordination.Coordinator.start')
            self.patch('cinder.coordination.Coordinator.stop')
            self.patch('cinder.coordination.Coordinator.get_lock')

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_url = 'fake:/'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)

        # Load oslo_messaging_notifications config group so we can set an
        # override to prevent notifications from being ignored due to the
        # short-circuit mechanism.
        oslo_messaging.get_notification_transport(CONF)
        #  We need to use a valid driver for the notifications, so we use test.
        self.override_config('driver', ['test'],
                             group='oslo_messaging_notifications')
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection)
        self.useFixture(_DB_CACHE)

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(cinder_fixtures.WarningsFixture())

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.mock_notifier(self)

        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 self.POLICY_PATH),
                             group='oslo_policy')
        self.override_config('resource_query_filters_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 self.RESOURCE_FILTER_PATH))
        self._disable_osprofiler()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}

        self.override_config('backend_url', 'file://' + lock_path,
                             group='coordination')
        coordination.COORDINATOR.start()
        self.addCleanup(coordination.COORDINATOR.stop)

        if six.PY3:
            # TODO(smcginnis) Python 3 deprecates assertRaisesRegexp to
            # assertRaisesRegex, but Python 2 does not have the new name. This
            # can be removed once we stop supporting py2 or the new name is
            # added.
            self.assertRaisesRegexp = self.assertRaisesRegex

        # Ensure we have the default tpool size value and we don't carry
        # threads from other test runs.
        tpool.killall()
        tpool._nthreads = 20

        # NOTE(mikal): make sure we don't load a privsep helper accidentally
        self.useFixture(cinder_fixtures.PrivsepNoHelperFixture())
Exemplo n.º 21
0
 def test_inner_get_dev_no_devices(self):
     tempdir = self.useFixture(fixtures.TempDir()).path
     self.stub_out('nova.virt.disk.mount.nbd.NbdMount._detect_nbd_devices',
                   _fake_detect_nbd_devices_none)
     n = nbd.NbdMount(self.file, tempdir)
     self.assertFalse(n._inner_get_dev())
Exemplo n.º 22
0
    def _setup_fakelibvirt(self):
        # So that the _supports_direct_io does the test based
        # on the current working directory, instead of the
        # default instances_path which doesn't exist
        self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)

        # Put fakelibvirt in place
        if 'libvirt' in sys.modules:
            self.saved_libvirt = sys.modules['libvirt']
        else:
            self.saved_libvirt = None

        import nova.tests.unit.virt.libvirt.fake_imagebackend as \
            fake_imagebackend
        import nova.tests.unit.virt.libvirt.fake_libvirt_utils as \
            fake_libvirt_utils
        import nova.tests.unit.virt.libvirt.fakelibvirt as fakelibvirt

        import nova.tests.unit.virt.libvirt.fake_os_brick_connector as \
            fake_os_brick_connector

        sys.modules['libvirt'] = fakelibvirt
        import nova.virt.libvirt.driver
        import nova.virt.libvirt.firewall
        import nova.virt.libvirt.host

        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.driver.imagebackend',
                                 fake_imagebackend))
        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.driver.libvirt',
                                 fakelibvirt))
        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.driver.libvirt_utils',
                                 fake_libvirt_utils))
        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.host.libvirt',
                                 fakelibvirt))
        self.useFixture(
            fixtures.MonkeyPatch(
                'nova.virt.libvirt.imagebackend.libvirt_utils',
                fake_libvirt_utils))
        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.firewall.libvirt',
                                 fakelibvirt))

        self.useFixture(
            fixtures.MonkeyPatch('nova.virt.libvirt.driver.connector',
                                 fake_os_brick_connector))

        fakelibvirt.disable_event_thread(self)

        self.flags(rescue_image_id="2",
                   rescue_kernel_id="3",
                   rescue_ramdisk_id=None,
                   snapshots_directory='./',
                   group='libvirt')

        def fake_extend(image, size):
            pass

        def fake_migrateToURI(*a):
            pass

        def fake_make_drive(_self, _path):
            pass

        def fake_get_instance_disk_info(_self,
                                        instance,
                                        xml=None,
                                        block_device_info=None):
            return '[]'

        def fake_delete_instance_files(_self, _instance):
            pass

        def fake_wait():
            pass

        def fake_detach_device_with_retry(_self,
                                          get_device_conf_func,
                                          device,
                                          persistent,
                                          live,
                                          max_retry_count=7,
                                          inc_sleep_time=2,
                                          max_sleep_time=30):
            # Still calling detach, but instead of returning function
            # that actually checks if device is gone from XML, just continue
            # because XML never gets updated in these tests
            _self.detach_device(get_device_conf_func(device),
                                persistent=persistent,
                                live=live)
            return fake_wait

        self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
                       '_get_instance_disk_info', fake_get_instance_disk_info)

        self.stubs.Set(nova.virt.libvirt.driver.disk, 'extend', fake_extend)

        self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
                       'delete_instance_files', fake_delete_instance_files)

        self.stubs.Set(nova.virt.libvirt.guest.Guest,
                       'detach_device_with_retry',
                       fake_detach_device_with_retry)

        # Like the existing fakelibvirt.migrateToURI, do nothing,
        # but don't fail for these tests.
        self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain, 'migrateToURI',
                       fake_migrateToURI)

        # We can't actually make a config drive v2 because ensure_tree has
        # been faked out
        self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive',
                       fake_make_drive)
Exemplo n.º 23
0
 def test_nbd_no_free_devices(self):
     tempdir = self.useFixture(fixtures.TempDir()).path
     n = nbd.NbdMount(self.file, tempdir)
     self.useFixture(
         fixtures.MonkeyPatch('os.path.exists', _fake_exists_all_used))
     self.assertIsNone(n._allocate_nbd())
Exemplo n.º 24
0
    def setUp(self):
        super(TestCase, self).setUp()

        self.conf = service.prepare_service([],
                                            conf=utils.prepare_conf(),
                                            default_config_files=[],
                                            logging_level=logging.DEBUG,
                                            skip_log_opts=True)

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()), self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
            ))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.yaml'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id',
                                   "S3RVER",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key',
                                   "S3RVER",
                                   group="storage")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')

        if swexc:
            self.useFixture(
                fixtures.MockPatch('swiftclient.client.Connection',
                                   FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath', tempdir.path, 'storage')
        elif self.conf.storage.driver == 'ceph':
            self.conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                                   'storage')
            self.ceph_pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call(("ceph -c %s osd pool create %s "
                                 "16 16 replicated") %
                                (os.getenv("CEPH_CONF"), self.ceph_pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
                subprocess.call(("ceph -c %s osd pool application "
                                 "enable %s rbd") %
                                (os.getenv("CEPH_CONF"), self.ceph_pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', self.ceph_pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix",
                               str(uuid.uuid4())[:26], "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT)

        self.storage.upgrade()
        self.incoming.upgrade(3)
        self.chef = chef.Chef(self.coord, self.incoming, self.index,
                              self.storage)
Exemplo n.º 25
0
    def get_new_temp_dir(self):
        """Create a new temporary directory.

        :returns fixtures.TempDir
        """
        return self.useFixture(fixtures.TempDir())
Exemplo n.º 26
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        # Ensure BaseTestCase's ConfigureLogging fixture is disabled since
        # we're using our own (StandardLogging).
        with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'):
            super(TestCase, self).setUp()

        # How many of which service we've started. {$service-name: $count}
        self._service_fixture_count = collections.defaultdict(int)

        self.useFixture(nova_fixtures.OpenStackSDKFixture())

        self.useFixture(log_fixture.get_logging_handle_error_fixture())

        self.stdlog = self.useFixture(nova_fixtures.StandardLogging())

        # NOTE(sdague): because of the way we were using the lock
        # wrapper we ended up with a lot of tests that started
        # relying on global external locking being set up for them. We
        # consider all of these to be *bugs*. Tests should not require
        # global external locking, or if they do, they should
        # explicitly set it up themselves.
        #
        # The following REQUIRES_LOCKING class parameter is provided
        # as a bridge to get us there. No new tests should be added
        # that require it, and existing classes and tests should be
        # fixed to not need it.
        if self.REQUIRES_LOCKING:
            lock_path = self.useFixture(fixtures.TempDir()).path
            self.fixture = self.useFixture(
                config_fixture.Config(lockutils.CONF))
            self.fixture.config(lock_path=lock_path, group='oslo_concurrency')

        self.useFixture(conf_fixture.ConfFixture(CONF))

        if self.STUB_RPC:
            self.useFixture(nova_fixtures.RPCFixture('nova.test'))

            # we cannot set this in the ConfFixture as oslo only registers the
            # notification opts at the first instantiation of a Notifier that
            # happens only in the RPCFixture
            CONF.set_default('driver', ['test'],
                             group='oslo_messaging_notifications')

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.NovaObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.NovaObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)
        objects.Service.clear_min_version_cache()

        # NOTE(danms): Reset the cached list of cells
        from nova.compute import api
        api.CELLS = []
        context.CELL_CACHE = {}
        context.CELLS = []

        self.cell_mappings = {}
        self.host_mappings = {}
        # NOTE(danms): If the test claims to want to set up the database
        # itself, then it is responsible for all the mapping stuff too.
        if self.USES_DB:
            # NOTE(danms): Full database setup involves a cell0, cell1,
            # and the relevant mappings.
            self.useFixture(nova_fixtures.Database(database='api'))
            self._setup_cells()
            self.useFixture(nova_fixtures.DefaultFlavorsFixture())
        elif not self.USES_DB_SELF:
            # NOTE(danms): If not using the database, we mock out the
            # mapping stuff and effectively collapse everything to a
            # single cell.
            self.useFixture(nova_fixtures.SingleCellSimple())
            self.useFixture(nova_fixtures.DatabasePoisonFixture())

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(nova_fixtures.WarningsFixture())

        self.useFixture(ovo_fixture.StableObjectJsonFixture())

        # NOTE(mnaser): All calls to utils.is_neutron() are cached in
        # nova.utils._IS_NEUTRON.  We set it to None to avoid any
        # caching of that value.
        utils._IS_NEUTRON = None

        # Reset the global QEMU version flag.
        images.QEMU_VERSION = None

        # Reset the compute RPC API globals (mostly the _ROUTER).
        compute_rpcapi.reset_globals()

        # TODO(takashin): Remove MoxStubout fixture
        # after removing tests which uses mox and are related to
        # nova-network in the following files.
        #
        # - nova/tests/unit/api/openstack/compute/test_floating_ips.py
        # - nova/tests/unit/api/openstack/compute/test_security_groups.py
        # - nova/tests/unit/fake_network.py
        # - nova/tests/unit/network/test_manager.py
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(self._clear_attrs)
        self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
        self.policy = self.useFixture(policy_fixture.PolicyFixture())

        self.useFixture(nova_fixtures.PoisonFunctions())

        openstack_driver.DRIVER_CACHE = {}

        self.useFixture(nova_fixtures.ForbidNewLegacyNotificationFixture())

        # NOTE(mikal): make sure we don't load a privsep helper accidentally
        self.useFixture(nova_fixtures.PrivsepNoHelperFixture())
        self.useFixture(mock_fixture.MockAutospecFixture())

        # FIXME(danms): Disable this for all tests by default to avoid breaking
        # any that depend on default/previous ordering
        self.flags(build_failure_weight_multiplier=0.0,
                   group='filter_scheduler')

        # NOTE(melwitt): Reset the cached set of projects
        quota.UID_QFD_POPULATED_CACHE_BY_PROJECT = set()
        quota.UID_QFD_POPULATED_CACHE_ALL = False
Exemplo n.º 27
0
    def _start_ovsdb_server_and_idls(self):
        self.temp_dir = self.useFixture(fixtures.TempDir()).path
        # Start 2 ovsdb-servers one each for OVN NB DB and OVN SB DB
        # ovsdb-server with OVN SB DB can be used to test the chassis up/down
        # events.
        mgr = self.ovsdb_server_mgr = self.useFixture(
            process.OvsdbServer(self.temp_dir,
                                self.OVS_INSTALL_SHARE_PATH,
                                ovn_nb_db=True,
                                ovn_sb_db=True,
                                protocol=self._ovsdb_protocol))
        set_cfg = cfg.CONF.set_override
        set_cfg('ovn_nb_connection',
                self.ovsdb_server_mgr.get_ovsdb_connection_path(), 'ovn')
        set_cfg('ovn_sb_connection',
                self.ovsdb_server_mgr.get_ovsdb_connection_path(db_type='sb'),
                'ovn')
        set_cfg('ovn_nb_private_key', self.ovsdb_server_mgr.private_key, 'ovn')
        set_cfg('ovn_nb_certificate', self.ovsdb_server_mgr.certificate, 'ovn')
        set_cfg('ovn_nb_ca_cert', self.ovsdb_server_mgr.ca_cert, 'ovn')
        set_cfg('ovn_sb_private_key', self.ovsdb_server_mgr.private_key, 'ovn')
        set_cfg('ovn_sb_certificate', self.ovsdb_server_mgr.certificate, 'ovn')
        set_cfg('ovn_sb_ca_cert', self.ovsdb_server_mgr.ca_cert, 'ovn')

        num_attempts = 0
        # 5 seconds should be more than enough for the transaction to complete
        # for the test cases.
        # This also fixes the bug #1607639.
        cfg.CONF.set_override('ovsdb_connection_timeout', 5, 'ovn')

        # Created monitor IDL connection to the OVN NB DB.
        # This monitor IDL connection can be used to
        #   - Verify that the ML2 OVN driver has written to the OVN NB DB
        #     as expected.
        #   - Create and delete resources in OVN NB DB outside of the
        #     ML2 OVN driver scope to test scenarios like ovn_nb_sync.
        while num_attempts < 3:
            try:
                con = self.useFixture(
                    ConnectionFixture(constr=mgr.get_ovsdb_connection_path(),
                                      schema='OVN_Northbound')).connection
                self.nb_api = impl_idl_ovn.OvsdbNbOvnIdl(con)
                break
            except Exception:
                LOG.exception("Error connecting to the OVN_Northbound DB")
                num_attempts += 1
                time.sleep(1)

        num_attempts = 0

        # Create monitor IDL connection to the OVN SB DB.
        # This monitor IDL connection can be used to
        #  - Create chassis rows
        #  - Update chassis columns etc.
        while num_attempts < 3:
            try:
                con = self.useFixture(
                    ConnectionFixture(
                        constr=mgr.get_ovsdb_connection_path('sb'),
                        schema='OVN_Southbound')).connection
                self.sb_api = impl_idl_ovn.OvsdbSbOvnIdl(con)
                break
            except Exception:
                LOG.exception("Error connecting to the OVN_Southbound DB")
                num_attempts += 1
                time.sleep(1)

        class TriggerCls(mock.MagicMock):
            def trigger(self):
                pass

        trigger_cls = TriggerCls()
        if self.maintenance_worker:
            trigger_cls.trigger.__self__.__class__ = worker.MaintenanceWorker
            cfg.CONF.set_override('neutron_sync_mode', 'off', 'ovn')

        self.addCleanup(self.stop)

        # mech_driver.post_fork_initialize creates the IDL connections
        self.mech_driver.post_fork_initialize(mock.ANY, mock.ANY,
                                              trigger_cls.trigger)
Exemplo n.º 28
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()
        self.useFixture(
            nova_fixtures.Timeout(os.environ.get('OS_TEST_TIMEOUT', 0),
                                  self.TIMEOUT_SCALING_FACTOR))

        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())
        self.useFixture(log_fixture.get_logging_handle_error_fixture())

        self.output = nova_fixtures.OutputStreamCapture()
        self.useFixture(self.output)

        self.stdlog = nova_fixtures.StandardLogging()
        self.useFixture(self.stdlog)

        # NOTE(sdague): because of the way we were using the lock
        # wrapper we ended up with a lot of tests that started
        # relying on global external locking being set up for them. We
        # consider all of these to be *bugs*. Tests should not require
        # global external locking, or if they do, they should
        # explicitly set it up themselves.
        #
        # The following REQUIRES_LOCKING class parameter is provided
        # as a bridge to get us there. No new tests should be added
        # that require it, and existing classes and tests should be
        # fixed to not need it.
        if self.REQUIRES_LOCKING:
            lock_path = self.useFixture(fixtures.TempDir()).path
            self.fixture = self.useFixture(
                config_fixture.Config(lockutils.CONF))
            self.fixture.config(lock_path=lock_path, group='oslo_concurrency')

        self.useFixture(conf_fixture.ConfFixture(CONF))

        if self.STUB_RPC:
            self.useFixture(nova_fixtures.RPCFixture('nova.test'))

        # we cannot set this in the ConfFixture as oslo only registers the
        # notification opts at the first instantiation of a Notifier that
        # happens only in the RPCFixture
        CONF.set_default('driver', ['test'],
                         group='oslo_messaging_notifications')

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.NovaObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.NovaObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)
        objects.Service.clear_min_version_cache()

        # NOTE(danms): Reset the cached list of cells
        from nova.compute import api
        api.CELLS = []
        context.CELL_CACHE = {}
        context.CELLS = []

        self.cell_mappings = {}
        self.host_mappings = {}
        # NOTE(danms): If the test claims to want to set up the database
        # itself, then it is responsible for all the mapping stuff too.
        if self.USES_DB:
            # NOTE(danms): Full database setup involves a cell0, cell1,
            # and the relevant mappings.
            self.useFixture(nova_fixtures.Database(database='api'))
            self._setup_cells()
            self.useFixture(nova_fixtures.DefaultFlavorsFixture())
        elif not self.USES_DB_SELF:
            # NOTE(danms): If not using the database, we mock out the
            # mapping stuff and effectively collapse everything to a
            # single cell.
            self.useFixture(nova_fixtures.SingleCellSimple())
            self.useFixture(nova_fixtures.DatabasePoisonFixture())

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(nova_fixtures.WarningsFixture())

        self.useFixture(ovo_fixture.StableObjectJsonFixture())

        # NOTE(mnaser): All calls to utils.is_neutron() are cached in
        # nova.utils._IS_NEUTRON.  We set it to None to avoid any
        # caching of that value.
        utils._IS_NEUTRON = None

        # Reset the traits sync and rc cache flags
        resource_provider._TRAITS_SYNCED = False
        resource_provider._RC_CACHE = None
        # Reset the global QEMU version flag.
        images.QEMU_VERSION = None

        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(self._clear_attrs)
        self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
        self.policy = self.useFixture(policy_fixture.PolicyFixture())

        self.useFixture(nova_fixtures.PoisonFunctions())

        openstack_driver.DRIVER_CACHE = {}

        self.useFixture(nova_fixtures.ForbidNewLegacyNotificationFixture())

        # NOTE(mikal): make sure we don't load a privsep helper accidentally
        self.useFixture(nova_fixtures.PrivsepNoHelperFixture())
Exemplo n.º 29
0
 def setUp(self):
     os.environ["EDITOR"] = 'cat'
     self.tempdir = self.useFixture(fixtures.TempDir()).path
     os.chdir(self.tempdir)
Exemplo n.º 30
0
    def test_config_download_os_apply_config(self, mock_deployment_data,
                                             mock_config_dict,
                                             mock_deployment_resource_id,
                                             mock_git_init):
        heat = mock.MagicMock()
        self.config = ooo_config.Config(heat)
        stack = mock.MagicMock()
        heat.stacks.get.return_value = stack
        heat.resources.get.return_value = mock.MagicMock()
        stack.outputs = [{
            'output_key': 'RoleNetHostnameMap',
            'output_value': {
                'Controller': {
                    'ctlplane':
                    ['overcloud-controller-0.ctlplane.localdomain']
                },
                'Compute': {
                    'ctlplane': [
                        'overcloud-novacompute-0.ctlplane.localdomain',
                        'overcloud-novacompute-1.ctlplane.localdomain',
                        'overcloud-novacompute-2.ctlplane.localdomain'
                    ]
                }
            }
        }, {
            'output_key': 'ServerIdData',
            'output_value': {
                'server_ids': {
                    'Controller': ['00b3a5e1-5e8e-4b55-878b-2fa2271f15ad'],
                    'Compute': [
                        'a7db3010-a51f-4ae0-a791-2364d629d20d',
                        '8b07cd31-3083-4b88-a433-955f72039e2c',
                        '169b46f8-1965-4d90-a7de-f36fb4a830fe'
                    ]
                }
            }
        }, {
            'output_key': 'RoleNetworkConfigMap',
            'output_value': {}
        }, {
            'output_key': 'RoleGroupVars',
            'output_value': {
                'Controller': {
                    'any_errors_fatal': 'yes',
                    'max_fail_percentage': 15
                },
                'Compute': {
                    'any_errors_fatal': 'yes',
                    'max_fail_percentage': 15
                },
            }
        }]
        deployment_data, configs = \
            self._get_config_data('config_data.yaml')

        # Add a group:os-apply-config config and deployment
        config_uuid = str(uuid.uuid4())
        configs[config_uuid] = dict(id=config_uuid,
                                    config=dict(a='a'),
                                    group='os-apply-config',
                                    outputs=[])

        deployment_uuid = str(uuid.uuid4())
        deployment_mock = mock.MagicMock()
        deployment_mock.id = deployment_uuid
        deployment_mock.attributes = dict(
            value=dict(server='00b3a5e1-5e8e-4b55-878b-2fa2271f15ad',
                       deployment=deployment_uuid,
                       config=config_uuid,
                       name='OsApplyConfigDeployment'))
        deployment_data.append(deployment_mock)

        self.configs = configs
        self.deployments = deployment_data
        mock_deployment_data.return_value = deployment_data
        mock_config_dict.side_effect = self._get_config_dict
        mock_deployment_resource_id.side_effect = self._get_deployment_id

        self.tmp_dir = self.useFixture(fixtures.TempDir()).path
        with warnings.catch_warnings(record=True) as w:
            self.config.download_config(stack, self.tmp_dir)
            mock_git_init.assert_called_once_with(self.tmp_dir)
            # check that we got at least one of the warnings that we expected
            # to throw
            self.assertGreaterEqual(len(w), 1)
            self.assertGreaterEqual(
                len([
                    x for x in w if issubclass(x.category, DeprecationWarning)
                ]), 1)
            self.assertGreaterEqual(
                len([
                    x for x in w if "group:os-apply-config" in str(x.message)
                ]), 1)