def test_list(self): cluster_id = self.fake_nodegroup['cluster_id'] with mock.patch.object(self.dbapi, 'list_cluster_nodegroups', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_nodegroup] nodegroups = objects.NodeGroup.list(self.context, cluster_id) self.assertEqual(1, mock_get_list.call_count) mock_get_list.assert_called_once_with(self.context, cluster_id, limit=None, marker=None, filters=None, sort_dir=None, sort_key=None) self.assertThat(nodegroups, HasLength(1)) self.assertIsInstance(nodegroups[0], objects.NodeGroup) self.assertEqual(self.context, nodegroups[0]._context)
def test_list_with_filters(self, mock_cluster_template_get): with mock.patch.object(self.dbapi, 'get_cluster_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_cluster] mock_cluster_template_get.return_value = self.fake_cluster_template filters = {'name': 'cluster1'} clusters = objects.Cluster.list(self.context, filters=filters) mock_get_list.assert_called_once_with(self.context, sort_key=None, sort_dir=None, filters=filters, limit=None, marker=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(clusters, HasLength(1)) self.assertIsInstance(clusters[0], objects.Cluster) self.assertEqual(self.context, clusters[0]._context)
def test_GET_returns_all_addresses_if_admin_and_all_specified(self): factory.make_StaticIPAddress(alloc_type=IPADDRESS_TYPE.USER_RESERVED, user=self.user) factory.make_StaticIPAddress(alloc_type=IPADDRESS_TYPE.USER_RESERVED, user=factory.make_User()) response = self.client.get(reverse('ipaddresses_handler'), {"all": "1"}) if self.user.is_superuser: self.assertEqual(http.client.OK, response.status_code, response.content) parsed_result = json_load_bytes(response.content) self.assertThat(parsed_result, HasLength(2)) else: self.assertEqual(http.client.FORBIDDEN, response.status_code, response.content) self.assertThat( response.content.decode("utf-8"), Equals("Listing all IP addresses requires admin privileges."))
def test__limited_to_10_nodes_at_a_time_by_default(self): # Configure the rack controller subnet to be large enough. rack = factory.make_RackController(power_type='') rack_interface = rack.get_boot_interface() subnet = factory.make_Subnet( cidr=str(factory.make_ipv6_network(slash=8))) factory.make_StaticIPAddress( ip=factory.pick_ip_in_Subnet(subnet), subnet=subnet, interface=rack_interface) # Create at least 11 nodes connected to the rack. for _ in range(11): self.make_Node(bmc_connected_to=rack) # Only 10 nodes' power parameters are returned. self.assertThat( list_cluster_nodes_power_parameters(rack.system_id), HasLength(10))
def test_list_with_filters(self): with mock.patch.object(self.dbapi, 'list_volume_mappings', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_volume_mapping] filt = {'volume_provider': 'fake_provider'} volume_mappings = objects.VolumeMapping.list(self.context, filters=filt) self.assertEqual(1, mock_get_list.call_count) self.assertThat(volume_mappings, HasLength(1)) self.assertIsInstance(volume_mappings[0], objects.VolumeMapping) self.assertEqual(self.context, volume_mappings[0]._context) mock_get_list.assert_called_once_with(self.context, filters=filt, limit=None, marker=None, sort_key=None, sort_dir=None)
def test_get_build_properties(self): expected_build_properties = [ "kernel-image-target", "kernel-with-firmware", "kernel-initrd-modules", "kernel-initrd-firmware", "kernel-device-trees", "kernel-initrd-compression", ] resulting_build_properties = kernel.KernelPlugin.get_build_properties() expected_build_properties.extend( snapcraft.plugins.kbuild.KBuildPlugin.get_build_properties()) self.assertThat(resulting_build_properties, HasLength(len(expected_build_properties))) for property in expected_build_properties: self.assertIn(property, resulting_build_properties)
def test_GET_query_with_nonexistent_id_returns_empty_list(self): # Trying to list events for a nonexistent node id returns a list # containing no nodes -- even if other (non-matching) nodes exist. node = factory.make_Node() make_events(node=node) existing_id = node.system_id nonexistent_id = existing_id + factory.make_string() response = self.client.get(reverse("events_handler"), { "op": "query", "id": [nonexistent_id] }) self.assertThat( json_load_bytes(response.content), ContainsDict({ "count": Equals(0), "events": HasLength(0) }), )
def test_list(self): with mock.patch.object(self.dbapi, 'get_volume_connector_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.volume_connector_dict] volume_connectors = objects.VolumeConnector.list(self.context, limit=4, sort_key='uuid', sort_dir='asc') mock_get_list.assert_called_once_with(limit=4, marker=None, sort_key='uuid', sort_dir='asc') self.assertThat(volume_connectors, HasLength(1)) self.assertIsInstance(volume_connectors[0], objects.VolumeConnector) self.assertEqual(self.context, volume_connectors[0]._context)
def test_get_validation_sets_by_name(self): build_assertion = self.client.post_validation_sets_build_assertion( validation_sets=self.validation_sets_build ) self.client.post_validation_sets( signed_validation_sets=self._fake_sign(build_assertion) ) # Create a different build_assertion from the first one. build_assertion.name = "not-acme" self.client.post_validation_sets( signed_validation_sets=self._fake_sign(build_assertion) ) vs = self.client.get_validation_sets(name="acme-cert-2020-10") self.assertThat(vs, IsInstance(validation_sets.ValidationSets)) self.expectThat(vs.assertions, HasLength(1)) self.expectThat(vs.assertions[0].name, Equals("acme-cert-2020-10"))
def test_basic_use_works(self): """ After inserting a bunch of deployments into a ``GenerationTracker`` in sequence, the ``Diff`` returned from ``get_diff_from_hash_to_latest`` can be applied to convert each of the deployments to the latest deployment. """ deployments = list(related_deployments_strategy(5).example()) # The diffing algorithm is potentially a little more interesting if # there are repeat deployments in the queue of deployments being # tracked. deployments[3] = deployments[1] tracker_under_test = GenerationTracker(10) # Populate the queue of deployment configurations in the tracker with # each of the generated deployments. for d in deployments: tracker_under_test.insert_latest(d) # The latest deployment is the last one added. last_deployment = deployments[-1] # Verify that we can compute the diff from each of the deployments to # the latest deployment. for d in deployments: computed_diffs = set() for _ in xrange(5): # In practice, we might insert the last deployment multiple # times. Verify that no matter how many times we insert it, we # still compute a valid diff. tracker_under_test.insert_latest(last_deployment) diff = tracker_under_test.get_diff_from_hash_to_latest( make_generation_hash(d)) # Verify that the returned diff can be applied to the current # deployment to transform it into the latest deployment. self.assertThat(diff.apply(d), Equals(last_deployment)) computed_diffs.add(diff) # Verify that all of the diffs that we computed were the same. self.assertThat(computed_diffs, HasLength(1))
def test_serialize_deserialize_snapshot(self, content1, content2, filename): """ create a new snapshot (this will have no parent snapshots). """ data1 = io.BytesIO(content1) snapshots = [] d = create_snapshot( name=filename, author=self.alice, data_producer=data1, snapshot_stash_dir=self.stash_dir, parents=[], ) d.addCallback(snapshots.append) self.assertThat( d, succeeded(Always()), ) # now modify the same file and create a new local snapshot data2 = io.BytesIO(content2) d = create_snapshot( name=filename, author=self.alice, data_producer=data2, snapshot_stash_dir=self.stash_dir, parents=[snapshots[0]], ) d.addCallback(snapshots.append) serialized = snapshots[1].to_json() reconstructed_local_snapshot = LocalSnapshot.from_json( serialized, self.alice) self.assertThat( reconstructed_local_snapshot, MatchesStructure( name=Equals(filename), parents_local=HasLength(1), ))
def test__can_watch_config(self): callback = lambda: None config_name = factory.make_name("config") manager = SignalsManager() manager.watch_config(callback, config_name) self.assertThat(manager._signals, HasLength(1)) [signal] = manager._signals self.assertThat( signal.connect, MatchesPartialCall(Config.objects.config_changed_connect, config_name, callback), ) self.assertThat( signal.disconnect, MatchesPartialCall(Config.objects.config_changed_disconnect, config_name, callback), )
def test_list_with_filters(self): with mock.patch.object(self.dbapi, 'get_x509keypair_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_x509keypair] filters = {'name': 'x509keypair1'} x509keypairs = objects.X509KeyPair.list(self.context, filters=filters) mock_get_list.assert_called_once_with(self.context, sort_key=None, sort_dir=None, filters=filters, limit=None, marker=None) self.assertEqual(1, mock_get_list.call_count) self.assertThat(x509keypairs, HasLength(1)) self.assertIsInstance(x509keypairs[0], objects.X509KeyPair) self.assertEqual(self.context, x509keypairs[0]._context)
def succeeded_with_unblinded_tokens(all_token_count, returned_token_count): """ :return: A matcher which matches a Deferred which fires with a response like the one returned by the **unblinded-tokens** endpoint. :param int all_token_count: The expected value in the ``total`` field of the response. :param int returned_token_count: The expected number of tokens in the ``unblinded-tokens`` field of the response. """ return succeeded_with_unblinded_tokens_with_matcher( all_token_count, MatchesAll( HasLength(returned_token_count), AllMatch(IsInstance(unicode)), ), matches_lease_maintenance_spending(), )
def test_makeTest_makes_tests_from_test_function_with_scenarios(self): class SomeTests(MAASTestCase): scenarios = [("scn1", {"attr": 1}), ("scn2", {"attr": 2})] def test_a(self): """Example test method.""" def test_b(self): """Example test method.""" method = random.choice((SomeTests.test_a, SomeTests.test_b)) tests = self.makeTest(Scenarios(), method, SomeTests) self.assertThat(tests, HasLength(2)) self.assertThat(tests, AllMatch(IsInstance(SomeTests))) self.assertThat( {(test._testMethodName, test.attr) for test in tests}, Equals({(method.__name__, 1), (method.__name__, 2)}), )
def test_updates_cache_if_event_type_not_found(self): protocol, connecting = self.patch_rpc_methods( side_effect=[succeed({}), fail(NoSuchEventType())]) self.addCleanup((yield connecting)) ip_address = factory.make_ip_address() description = factory.make_name("description") event_name = random.choice(list(map_enum(EVENT_TYPES))) event_hub = NodeEventHub() # Fine the first time. yield event_hub.logByIP(event_name, ip_address, description) # The cache has been populated with the event name. self.assertThat(event_hub._types_registered, Equals({event_name})) # Second time it crashes. with ExpectedException(NoSuchEventType): yield event_hub.logByIP(event_name, ip_address, description) # The event has been removed from the cache. self.assertThat(event_hub._types_registered, HasLength(0))
def test_mountinfo_by_root(self): mounts = mountinfo.MountInfo(mountinfo_file=self._write_mountinfo( dedent("""\ 23 28 0:4 / /proc rw,nosuid,nodev,noexec,relatime shared:14 - proc proc rw 1341 28 7:6 / /snap/snapcraft/1 ro,nodev,relatime shared:39 - squashfs /dev/loop6 ro 1455 28 253:0 /test-snap/prime /snap/test-snap/x1 ro,relatime shared:1 - ext4 /dev/mapper/foo rw,errors=remount-ro,data=ordered """))) # noqa root_mounts = mounts.for_root('/') for mount_point in ('/proc', '/snap/snapcraft/1'): self.assertTrue( any(m for m in root_mounts if m.mount_point == mount_point), 'Expected {!r} to be included in root mounts'.format( mount_point)) test_snap_mounts = mounts.for_root('/test-snap/prime') self.assertThat(test_snap_mounts, HasLength(1)) self.expectThat(test_snap_mounts[0].mount_point, Equals('/snap/test-snap/x1'))
def test_request_success(self): """ When a request is made, it is made to all marathon-lb instances and the responses are returned. """ d = self.cleanup_d(self.client.request('GET', path='/my-path')) for lb in ['lb1', 'lb2']: request = yield self.requests.get() self.assertThat(request, HasRequestProperties( method='GET', url='http://%s:9090/my-path' % (lb,))) request.setResponseCode(200) request.finish() responses = yield d self.assertThat(responses, HasLength(2)) for response in responses: self.assertThat(response.code, Equals(200))
def test__prints_event_json_in_verbose_mode(self): out = io.StringIO() input = ( b"+;eth0;IPv4" b";HP\\032Color\\032LaserJet\\032CP2025dn\\032\\040test\\041;" b"_http._tcp;local\n") expected_result = { 'event': 'BROWSER_NEW', 'interface': 'eth0', 'protocol': 'IPv4', 'service_name': "HP Color LaserJet CP2025dn (test)", 'type': '_http._tcp', 'domain': 'local' } observe_mdns(verbose=True, input=[input], output=out) output = io.StringIO(out.getvalue()) lines = output.readlines() self.assertThat(lines, HasLength(1)) self.assertThat(json.loads(lines[0]), Equals(expected_result))
def test_yields_configuration_when_machine_install_kvm_true(self): node = factory.make_Node( status=NODE_STATUS.DEPLOYING, osystem="ubuntu", netboot=False ) node.install_kvm = True configuration = get_vendor_data(node, None) config = str(dict(configuration)) self.assertThat(config, Contains("virsh")) self.assertThat(config, Contains("ssh_pwauth")) self.assertThat(config, Contains("rbash")) self.assertThat(config, Contains("libvirt-qemu")) self.assertThat(config, Contains("ForceCommand")) self.assertThat(config, Contains("qemu-kvm")) self.assertThat(config, Contains("libvirt-bin")) # Check that a password was saved for the pod-to-be. virsh_password_meta = NodeMetadata.objects.filter( node=node, key="virsh_password" ).first() self.assertThat(virsh_password_meta.value, HasLength(32))
def test_request_partial_failure(self): """ When a request is made and an error status code is returned from some (but not all) of the matathon-lb instances, then the request returns the list of responses with a None value for the unhappy request. """ d = self.cleanup_d(self.client.request('GET', path='/my-path')) lb1_request = yield self.requests.get() self.assertThat(lb1_request, HasRequestProperties( method='GET', url='http://lb1:9090/my-path')) lb2_request = yield self.requests.get() self.assertThat(lb2_request, HasRequestProperties( method='GET', url='http://lb2:9090/my-path')) # Fail the first one lb1_request.setResponseCode(500) lb1_request.setHeader('content-type', 'text/plain') lb1_request.write(b'Internal Server Error') lb1_request.finish() # ...but succeed the second lb2_request.setResponseCode(200) lb2_request.setHeader('content-type', 'text/plain') lb2_request.write(b'Yes, I work') lb2_request.finish() responses = yield d self.assertThat(responses, HasLength(2)) lb1_response, lb2_response = responses self.assertThat(lb1_response, Is(None)) self.assertThat(lb2_response, MatchesStructure( code=Equals(200), headers=HasHeader('content-type', ['text/plain']) )) lb2_response_content = yield lb2_response.content() self.assertThat(lb2_response_content, Equals(b'Yes, I work')) flush_logged_errors(HTTPError)
def test_updates_interfaces_in_database(self): region = yield deferToDatabase(factory.make_RegionController) region.owner = yield deferToDatabase(factory.make_admin) yield deferToDatabase(region.save) # Declare this region controller as the one running here. self.useFixture(MAASIDFixture(region.system_id)) interfaces = { factory.make_name("eth"): { "type": "physical", "mac_address": factory.make_mac_address(), "parents": [], "links": [], "enabled": True, } } service = RegionNetworksMonitoringService(reactor, enable_beaconing=False) service.getInterfaces = lambda: succeed(interfaces) with FakeLogger("maas") as logger: service.startService() yield service.stopService() # Nothing was logged. self.assertThat( logger.output, DocTestMatches("Networks monitoring service: " "Process ID ... assumed responsibility.")) def get_interfaces(): return list(region.interface_set.all()) interfaces_observed = yield deferToDatabase(get_interfaces) self.assertThat(interfaces_observed, HasLength(1)) interface_observed = interfaces_observed[0] self.assertThat(interface_observed, IsInstance(PhysicalInterface)) self.assertThat(interfaces, Contains(interface_observed.name)) interface_expected = interfaces[interface_observed.name] self.assertThat(interface_observed.mac_address.raw, Equals(interface_expected["mac_address"]))
def test__schedules_unlink(self): # We're going to capture the delayed call that # delete_large_object_content_later() creates. clock = self.patch(largefile_module, "reactor", Clock()) with transaction.atomic(): largefile = factory.make_LargeFile() oid = largefile.content.oid with post_commit_hooks: largefile.delete() # Deleting `largefile` resulted in a call being scheduled. delayed_calls = clock.getDelayedCalls() self.assertThat(delayed_calls, HasLength(1)) [delayed_call] = delayed_calls # It is scheduled to be run on the next iteration of the reactor. self.assertFalse(delayed_call.called) self.assertThat( delayed_call, MatchesStructure( func=MatchesStructure.byEquality(__name__="unlink"), args=MatchesListwise([Is(largefile.content)]), kw=Equals({}), time=Equals(0), ), ) # Call the delayed function ourselves instead of advancing `clock` so # that we can wait for it to complete (it returns a Deferred). func = wait_for(30)(delayed_call.func) # Wait 30 seconds. func(*delayed_call.args, **delayed_call.kw) # The content has been removed from the database. with transaction.atomic(): error = self.assertRaises( psycopg2.OperationalError, LargeObjectFile(oid).open, "rb" ) self.assertDocTestMatches( "ERROR: large object ... does not exist", str(error) )
def test__logs_failures_from_cancelled_hooks(self): logger = self.useFixture(TwistedLoggerFixture()) error = factory.make_exception() dhooks = DeferredHooks() d = Deferred() d.addBoth(lambda _: Failure(error)) dhooks.add(d) dhooks.reset() self.assertThat(dhooks.hooks, HasLength(0)) self.assertThat(d, IsFiredDeferred()) self.assertDocTestMatches( dedent("""\ Failure when cancelling hook. Traceback (most recent call last): ... maastesting.factory.TestException#... """), logger.output, )
def test__non_verbose_removes_redundant_events_and_outputs_summary(self): out = io.StringIO() input = ("=;eth0;IPv4" ";HP\\032Color\\032LaserJet\\032CP2025dn\\032\\040test\\041;" "_http._tcp;local;" "printer.local;" "192.168.0.222;" "80;" '"priority=50" "rp=RAW"\n') observe_mdns(verbose=False, input=[input, input], output=out) output = io.StringIO(out.getvalue()) lines = output.readlines() self.assertThat(lines, HasLength(1)) self.assertThat( json.loads(lines[0]), Equals({ 'interface': 'eth0', 'address': '192.168.0.222', 'hostname': 'printer', }))
def test_list_by_node_id(self): with mock.patch.object(self.dbapi, 'get_volume_connectors_by_node_id', autospec=True) as mock_get_list_by_node_id: mock_get_list_by_node_id.return_value = [ self.volume_connector_dict ] node_id = self.volume_connector_dict['node_id'] volume_connectors = objects.VolumeConnector.list_by_node_id( self.context, node_id, limit=10, sort_dir='desc') mock_get_list_by_node_id.assert_called_once_with(node_id, limit=10, marker=None, sort_key=None, sort_dir='desc') self.assertThat(volume_connectors, HasLength(1)) self.assertIsInstance(volume_connectors[0], objects.VolumeConnector) self.assertEqual(self.context, volume_connectors[0]._context)
def test_GET_with_all_for_admin_returns_non_user_reserved_types(self): factory.make_StaticIPAddress(alloc_type=IPADDRESS_TYPE.STICKY) factory.make_StaticIPAddress(alloc_type=IPADDRESS_TYPE.USER_RESERVED) response = self.client.get( reverse("ipaddresses_handler"), {"all": "true"} ) if self.user.is_superuser: self.assertEqual( http.client.OK, response.status_code, response.content ) parsed_result = json_load_bytes(response.content) self.assertThat(parsed_result, HasLength(2)) else: self.assertEqual( http.client.FORBIDDEN, response.status_code, response.content ) self.assertThat( response.content.decode("utf-8"), Equals("Listing all IP addresses requires admin privileges."), )
class TestWarningsMatcherNoWarningsInterface(TestCase, TestMatchersInterface): """ Tests for `testtools.matchers._warnings.Warnings`. Specifically with the optional matcher argument matching that there were no warnings. """ matches_matcher = Warnings(warnings_matcher=HasLength(0)) def nowarning_func(): pass def warning_func(): warnings.warn('warning_func is deprecated', DeprecationWarning, 2) matches_matches = [nowarning_func] matches_mismatches = [warning_func] str_examples = [] describe_examples = []
def test__prints_event_json_in_verbose_mode(self): out = io.StringIO() input = ( b"+;eth0;IPv4" b";HP\\032Color\\032LaserJet\\032CP2025dn\\032\\040test\\041;" b"_http._tcp;local\n" ) expected_result = { "event": "BROWSER_NEW", "interface": "eth0", "protocol": "IPv4", "service_name": "HP Color LaserJet CP2025dn (test)", "type": "_http._tcp", "domain": "local", } observe_mdns(verbose=True, input=[input], output=out) output = io.StringIO(out.getvalue()) lines = output.readlines() self.assertThat(lines, HasLength(1)) self.assertThat(json.loads(lines[0]), Equals(expected_result))
def test__logs_failures_from_cancellers_when_hook_already_fired(self): logger = self.useFixture(TwistedLoggerFixture()) def canceller(d): d.callback(None) raise factory.make_exception() dhooks = DeferredHooks() d = Deferred(canceller) dhooks.add(d) dhooks.reset() self.assertThat(dhooks.hooks, HasLength(0)) self.assertThat(d, IsFiredDeferred()) self.assertDocTestMatches( dedent("""\ Failure when cancelling hook. Traceback (most recent call last): ... maastesting.factory.TestException#... """), logger.output)