def test_strings_and_jsonable_types_differ(self): """ Strings and integers hash to different values. """ self.assertThat(generation_hash(5), Not(Equals(generation_hash('5'))))
def test_includes_no_system_information_if_no_default_user(self): node = factory.make_Node(owner=factory.make_User()) vendor_data = get_vendor_data(node) self.assertThat(vendor_data, Not(Contains('system_info')))
def test__membership_can_be_tested(self): item1 = make_name_without_spaces("item") item2 = make_name_without_spaces("item") objectset = ObjectSet([item1]) self.assertThat(objectset, Contains(item1)) self.assertThat(objectset, Not(Contains(item2)))
def check_a_does_not_exist(): self.assertThat(thread_local.visiting, Not(Contains("a")))
def assertNotIn(self, needle, haystack, message=''): """Assert that needle is not in haystack.""" matcher = Not(Contains(needle)) self.assertThat(haystack, matcher, message)
def test_list_live_nodes_pagination_and_removal(self): """ list_live_nodes should be able to walk pages to get all live nodes and should not have nodes after they are destroyed or stopped. Also, _stop_node and start_node should be able to take a node off-line and bring it back online. Sorry for testing two things in this test. Unfortunately, to verify pagination is working there must be two nodes running. Also, to test start_node and _stop_node, there must be a second instance started up. Since starting and stopping a node is time consuming, I decided to combine these into the same test. My apologies to future maintainers if this makes debugging failures less pleasant. """ api = gceblockdeviceapi_for_test(self) # Set page size to 1 to force pagination after we spin up a second # node. api = api.set('_page_size', 1) gce_fixture = self.useFixture(GCEComputeTestObjects( compute=api._operations._compute, project=get_machine_project(), zone=get_machine_zone() )) other_instance_name = u"functional-test-" + unicode(uuid4()) other_instance = gce_fixture.create_instance(other_instance_name) self.assertThat( api.list_live_nodes(), MatchesAll( Contains(other_instance_name), Contains(api.compute_instance_id()) ) ) api._stop_node(other_instance_name) self.assertThat( api.list_live_nodes(), MatchesAll( Not(Contains(other_instance_name)), Contains(api.compute_instance_id()) ) ) api.start_node(other_instance_name) self.assertThat( api.list_live_nodes(), MatchesAll( Contains(other_instance_name), Contains(api.compute_instance_id()) ) ) other_instance.destroy() self.assertThat( api.list_live_nodes(), MatchesAll( Not(Contains(other_instance_name)), Contains(api.compute_instance_id()) ) )
def test_clears_all_objects(self): node = factory.make_Node() physical_block_devices = [ factory.make_PhysicalBlockDevice(node=node, size=10 * 1000**3) for _ in range(3) ] filesystem = factory.make_Filesystem( block_device=physical_block_devices[0]) partition_table = factory.make_PartitionTable( block_device=physical_block_devices[1]) partition = factory.make_Partition(partition_table=partition_table) fslvm = factory.make_Filesystem( block_device=physical_block_devices[2], fstype=FILESYSTEM_TYPE.LVM_PV, ) vgroup = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[fslvm]) vbd1 = factory.make_VirtualBlockDevice(filesystem_group=vgroup, size=2 * 1000**3) vbd2 = factory.make_VirtualBlockDevice(filesystem_group=vgroup, size=3 * 1000**3) filesystem_on_vbd1 = factory.make_Filesystem( block_device=vbd1, fstype=FILESYSTEM_TYPE.LVM_PV) vgroup_on_vgroup = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem_on_vbd1], ) vbd3_on_vbd1 = factory.make_VirtualBlockDevice( filesystem_group=vgroup_on_vgroup, size=1 * 1000**3) clear_full_storage_configuration( node, PhysicalBlockDevice=PhysicalBlockDevice, VirtualBlockDevice=VirtualBlockDevice, PartitionTable=PartitionTable, Filesystem=Filesystem, FilesystemGroup=FilesystemGroup, ) for pbd in physical_block_devices: self.expectThat( reload_object(pbd), Not(Is(None)), "Physical block device should not have been deleted.", ) self.expectThat( reload_object(filesystem), Is(None), "Filesystem should have been removed.", ) self.expectThat( reload_object(partition_table), Is(None), "PartitionTable should have been removed.", ) self.expectThat( reload_object(partition), Is(None), "Partition should have been removed.", ) self.expectThat( reload_object(fslvm), Is(None), "LVM PV Filesystem should have been removed.", ) self.expectThat( reload_object(vgroup), Is(None), "Volume group should have been removed.", ) self.expectThat( reload_object(vbd1), Is(None), "Virtual block device should have been removed.", ) self.expectThat( reload_object(vbd2), Is(None), "Virtual block device should have been removed.", ) self.expectThat( reload_object(filesystem_on_vbd1), Is(None), "Filesystem on virtual block device should have been removed.", ) self.expectThat( reload_object(vgroup_on_vgroup), Is(None), "Volume group on virtual block device should have been removed.", ) self.expectThat( reload_object(vbd3_on_vbd1), Is(None), "Virtual block device on another virtual block device should have " "been removed.", )
def test_upgrade_upgrades_CalledProcessError(self): error = factory.make_CalledProcessError() self.expectThat(error, Not(IsInstance(ExternalProcessError))) ExternalProcessError.upgrade(error) self.expectThat(error, IsInstance(ExternalProcessError))
def check_bgp_no_peering(self, router1, router2): r1 = self._vnc_lib.bgp_router_read(fq_name=router1.get_fq_name()) ref_names = [ref['to'] for ref in r1.get_bgp_router_refs() or []] self.assertThat(ref_names, Not(Contains(router2.get_fq_name())))
def test_no_system_libraries(self): self.run_snapcraft(['prime', 'main-no-libs'], 'fake-curl-library') # Verify that the system's libcurl was NOT pulled in. self.assertThat(os.path.join(self.prime_dir, 'usr'), Not(DirExists()))
def test_create_meta_with_app_desktop_key(self): os.mkdir(self.prime_dir) open(os.path.join(self.prime_dir, 'app.sh'), 'w').close() with open(os.path.join(self.prime_dir, 'app1.desktop'), 'w') as f: f.write('[Desktop Entry]\nExec=app1.exe\nIcon=app1.png') icon_dir = os.path.join(self.prime_dir, 'usr', 'share') os.makedirs(icon_dir) open(os.path.join(icon_dir, 'app2.png'), 'w').close() with open(os.path.join(self.prime_dir, 'app2.desktop'), 'w') as f: f.write('[Desktop Entry]\nExec=app2.exe\nIcon=/usr/share/app2.png') with open(os.path.join(self.prime_dir, 'app3.desktop'), 'w') as f: f.write('[Desktop Entry]\nExec=app3.exe\nIcon=app3.png') self.config_data['apps'] = { 'app1': { 'command': 'app.sh', 'desktop': 'app1.desktop' }, 'app2': { 'command': 'app.sh', 'desktop': 'app2.desktop' }, 'my-package': { 'command': 'app.sh', 'desktop': 'app3.desktop' } } self.generate_meta_yaml() desktop_file = os.path.join(self.meta_dir, 'gui', 'app1.desktop') self.assertThat(desktop_file, FileExists()) contents = configparser.ConfigParser(interpolation=None) contents.read(desktop_file) section = 'Desktop Entry' self.assertTrue(section in contents) self.assertEqual(contents[section].get('Exec'), 'my-package.app1 %U') self.assertEqual(contents[section].get('Icon'), 'app1.png') desktop_file = os.path.join(self.meta_dir, 'gui', 'app2.desktop') self.assertThat(desktop_file, FileExists()) contents = configparser.ConfigParser(interpolation=None) contents.read(desktop_file) section = 'Desktop Entry' self.assertTrue(section in contents) self.assertEqual(contents[section].get('Exec'), 'my-package.app2 %U') self.assertEqual(contents[section].get('Icon'), '${SNAP}/usr/share/app2.png') desktop_file = os.path.join(self.meta_dir, 'gui', 'my-package.desktop') self.assertThat(desktop_file, FileExists()) contents = configparser.ConfigParser(interpolation=None) contents.read(desktop_file) section = 'Desktop Entry' self.assertTrue(section in contents) self.assertEqual(contents[section].get('Exec'), 'my-package %U') snap_yaml = os.path.join('prime', 'meta', 'snap.yaml') self.assertThat(snap_yaml, Not(FileContains('desktop: app1.desktop'))) self.assertThat(snap_yaml, Not(FileContains('desktop: app2.desktop'))) self.assertThat(snap_yaml, Not(FileContains('desktop: app3.desktop'))) self.assertThat(snap_yaml, Not(FileContains('desktop: my-package.desktop')))
def test_runs_locally(self): self.assertThat(self.run_script().strip(), Not(HasLength(0)))
def test_list_databases(self): dbaas.MySqlAdmin.list_databases = MagicMock(return_value=['database1']) databases = self.manager.list_databases(self.context) self.assertThat(databases, Not(Is(None))) self.assertThat(databases, Equals(['database1'])) dbaas.MySqlAdmin.list_databases.assert_any_call(None, None, False)
def test_can_select_switch(self): """Must be able to select the Qml Switch component.""" obj = self.app.select_single('Switch') self.assertThat(obj, Not(Is(None)))
def test_snap(self): self.copy_project_to_cwd("assemble") self.run_snapcraft("snap") snap_file_path = "assemble_1.0_{}.snap".format(self.deb_arch) self.assertThat(snap_file_path, FileExists()) binary1_wrapper_path = os.path.join(self.prime_dir, "command-assemble-bin.wrapper") with open("binary1.after", "r") as file_: expected_binary1_wrapper = file_.read() self.assertThat(binary1_wrapper_path, FileContains(expected_binary1_wrapper)) command_chain_path = os.path.join(self.prime_dir, "snap", "command-chain", "snapcraft-runner") with open("command-chain", "r") as f: expected_command_chain = f.read() self.assertThat(command_chain_path, FileContains(expected_command_chain)) self.useFixture( fixtures.EnvironmentVariable( "SNAP", os.path.join(os.getcwd(), self.prime_dir))) binary_scenarios = ( ("command-assemble-service.wrapper", "service-start\n"), ("stop-command-assemble-service.wrapper", "service-stop\n"), ("command-assemble-bin.wrapper", "binary1\n"), ("command-binary2.wrapper", "binary2\n"), ) for binary, expected_output in binary_scenarios: output = subprocess.check_output( [command_chain_path, os.path.join(self.prime_dir, binary)], universal_newlines=True, ) self.assertThat(output, Equals(expected_output)) with testtools.ExpectedException(subprocess.CalledProcessError): subprocess.check_output( os.path.join(self.prime_dir, "bin", "not-wrapped"), stderr=subprocess.STDOUT, ) self.assertThat( os.path.join(self.prime_dir, "bin", "not-wrapped.wrapper"), Not(FileExists()), ) self.assertThat( os.path.join(self.prime_dir, "bin", "command-binary-wrapper-none.wrapper.wrapper"), Not(FileExists()), ) # LP: #1750658 self.assertThat( os.path.join(self.prime_dir, "meta", "snap.yaml"), FileContains( dedent("""\ name: assemble version: 1.0 summary: one line summary description: a longer description architectures: - {} confinement: strict grade: stable apps: assemble-bin: command: snap/command-chain/snapcraft-runner $SNAP/command-assemble-bin.wrapper assemble-service: command: snap/command-chain/snapcraft-runner $SNAP/command-assemble-service.wrapper daemon: simple stop-command: snap/command-chain/snapcraft-runner $SNAP/stop-command-assemble-service.wrapper binary-wrapper-none: command: subdir/binary3 binary2: command: snap/command-chain/snapcraft-runner $SNAP/command-binary2.wrapper """).format(self.deb_arch)), )
def test_get_manifest_with_unexisting_cargo_lock(self): self.plugin.build() self.assertThat(self.plugin.get_manifest(), Not(Contains("cargo-lock-contents")))
def test_launch_process_can_set_capture_output(self, popen): launch_process("testapp", [], capture_output=True) self.assertThat(popen.call_args[1]['stderr'], Not(Equals(None))) self.assertThat(popen.call_args[1]['stdout'], Not(Equals(None)))
def test_workspace(self): self.plugin.build() self.assertThat(self.plugin.get_manifest(), Not(Contains("cargo-lock-contents")))
def test_duplicated_calls(self): """ Verify that if every call to the :class:`GCEOperations` is duplicated that we handle the errors correctly. This should force some specific scheduling situations that resemble race conditions with another agent trying to converge to the same state, or a condition where the dataset agent as rebooted after a crash that happened in the middle of an :class:`IBlockDeviceAPI` call. In these situations we should verify that the second call to many of the underlying atomic methods would result in the correct underlying :class:`VolumeException`. """ actual_api = gceblockdeviceapi_for_test(self) operations = actual_api._operations api = actual_api.set( '_operations', repeat_call_proxy_for(IGCEOperations, operations) ) dataset_id = uuid4() # There is no :class:`VolumeException` for creating an already created # volume. Thus, GCE just raises its own custom exception in that case. self.assertThat( lambda: api.create_volume( dataset_id=dataset_id, size=get_minimum_allocatable_size() ), Raises(MatchesException(GCEVolumeException)) ) volumes = api.list_volumes() self.assertThat( volumes, AnyMatch(MatchesStructure(dataset_id=Equals(dataset_id))) ) volume = next(v for v in volumes if v.dataset_id == dataset_id) compute_instance_id = api.compute_instance_id() self.assertThat( lambda: api.attach_volume( blockdevice_id=volume.blockdevice_id, attach_to=compute_instance_id, ), Raises(MatchesException(AlreadyAttachedVolume)) ) self.assertThat( api.get_device_path(volume.blockdevice_id).path, Contains('/dev/sd') ) # Detach volume does not error out because we have cleanup code in our # acceptance tests that assumes that calls to detach_volume while the # volume is already being detached do not error out, and instead block # until the volume is detached. # # With the repeat call proxy, this manifests as neither call reporting # the unattached volume, but both calls merely block until the # blockdevice is detached. api.detach_volume( blockdevice_id=volume.blockdevice_id, ) self.assertThat( lambda: api.destroy_volume( blockdevice_id=volume.blockdevice_id, ), Raises(MatchesException(UnknownVolume)) ) self.assertThat( api.list_volumes(), AllMatch(Not(MatchesStructure(dataset_id=Equals(dataset_id)))) )
def test_list_databases(self, list_databases_mock): databases = self.manager.list_databases(self.context) self.assertThat(databases, Not(Is(None))) self.assertThat(databases, Equals(list_databases_mock.return_value)) list_databases_mock.assert_any_call(None, None, False)
def test_io_bytesio(self): # io.BytesIO only accepts bytes so should be wrapped. bytes_io = io.BytesIO() self.assertThat(bytes_io, Not(Is(unicode_output_stream(bytes_io)))) # Will error if s was not wrapped properly. unicode_output_stream(bytes_io).write(_u('foo'))
def test_prime_includes_stage_excludes_fileset(self): self.run_snapcraft('prime', 'prime-from-stage') self.assertThat(os.path.join(self.prime_dir, 'with-a'), Not(FileExists())) self.assertThat(os.path.join(self.prime_dir, 'with-b'), FileExists()) self.assertThat(os.path.join(self.prime_dir, 'with-c'), FileExists())
def assertIsNot(self, expected, observed, message=''): """Assert that 'expected' is not 'observed'.""" matcher = Not(Is(expected)) self.assertThat(observed, matcher, message)
def test_prime_invalid_part_no_traceback_without_debug(self): self.assertThat(self._prime_invalid_part(False), Not(Contains("Traceback")))
def test_updates_MAAS_ROOT_in_the_environment(self): self.assertThat(os.environ["MAAS_ROOT"], Not(SamePath(self.skel))) with MAASRootFixture() as fixture: self.assertThat(os.environ["MAAS_ROOT"], SamePath(fixture.path)) self.assertThat(os.environ["MAAS_ROOT"], Not(SamePath(self.skel)))
def __eq__(self, args): # Matching like this for order independence (otherwise it would be # quite fragile) command = " ".join(args) if "debug" in self.test.build_attributes: self.test.assertThat( command, MatchesRegex(".*--cmake-args.*-DCMAKE_BUILD_TYPE=Debug") ) else: self.test.assertThat( command, MatchesRegex(".*--cmake-args.*-DCMAKE_BUILD_TYPE=Release") ) if self.test.colcon_cmake_args: expected_args = " ".join(self.test.colcon_cmake_args) self.test.assertThat( command, MatchesRegex(".*--cmake-args.*{}".format(re.escape(expected_args))), ) if self.test.properties.disable_parallel: self.test.assertThat(command, MatchesRegex(".*--parallel-workers=1")) else: self.test.assertThat( command, MatchesRegex( ".*--parallel-workers={}".format(self.plugin.parallel_build_count) ), ) if self.test.colcon_catkin_cmake_args: expected_args = " ".join(self.test.colcon_catkin_cmake_args) self.test.assertThat( command, MatchesRegex( ".*--catkin-cmake-args.*{}".format(re.escape(expected_args)) ), ) if self.test.colcon_ament_cmake_args: expected_args = " ".join(self.test.colcon_ament_cmake_args) self.test.assertThat( command, MatchesRegex( ".*--ament-cmake-args.*{}".format(re.escape(expected_args)) ), ) if self.test.colcon_packages_ignore: expected_args = " ".join(self.test.colcon_packages_ignore) self.test.assertThat( command, MatchesRegex( ".*--packages-ignore.*{}".format(re.escape(expected_args)) ), ) if self.test.colcon_packages: self.test.assertThat( command, Contains( "--packages-select {}".format(" ".join(self.test.colcon_packages)) ), ) else: self.test.assertThat(command, Not(Contains("--packages-select"))) self.test.assertThat(args[0:2], Equals(["colcon", "build"])) self.test.assertThat(command, Contains("--merge-install")) self.test.assertThat( command, Contains("--build-base {}".format(self.plugin.builddir)) ) self.test.assertThat( command, Contains("--base-paths {}".format(self.plugin._ros_package_path)) ) self.test.assertThat( command, Contains("--install-base {}".format(self.plugin._ros_overlay)) ) return True
def test__excludes_ObjectMethod_descriptors_without_class_methods(self): class Example: attribute = ObjectMethod() self.assertThat(list(dir_class(Example)), Not(Contains("attribute")))
def test_snap_from_snapcraft_init(self): self.assertThat("snapcraft.yaml", Not(FileExists())) self.run_snapcraft("init") self.assertThat(os.path.join("snap", "snapcraft.yaml"), FileExists()) self.run_snapcraft("snap")
def test__excludes_Disabled_class_descriptors(self): class Example: attribute = Disabled("foobar") self.assertThat(list(dir_class(Example)), Not(Contains("attribute")))
def test_maps_and_sets_differ(self): """ Mappings hash to different values than frozensets of their iteritems(). """ self.assertThat(generation_hash(frozenset([('a', 1), ('b', 2)])), Not(Equals(generation_hash(dict(a=1, b=2)))))