def test__tags_create(self): origin = make_origin() name = make_string_without_spaces() comment = make_string_without_spaces() origin.Tags._handler.create.return_value = { "name": name, "comment": comment, } tag = origin.Tags.create( name=name, comment=comment, ) origin.Tags._handler.create.assert_called_once_with( name=name, comment=comment, ) self.assertThat(tag, IsInstance(origin.Tag)) self.assertThat( tag, MatchesStructure.byEquality(name=name, comment=comment))
def test_makeTest_makes_tests_from_test_function_with_scenarios(self): class SomeTests(MAASTestCase): scenarios = [("scn1", {"attr": 1}), ("scn2", {"attr": 2})] def test_a(self): """Example test method.""" def test_b(self): """Example test method.""" method = random.choice((SomeTests.test_a, SomeTests.test_b)) tests = self.makeTest(Scenarios(), method, SomeTests) self.assertThat(tests, HasLength(2)) self.assertThat(tests, AllMatch(IsInstance(SomeTests))) self.assertThat( {(test._testMethodName, test.attr) for test in tests}, Equals({(method.__name__, 1), (method.__name__, 2)}), )
def succeeded_with_unblinded_tokens(all_token_count, returned_token_count): """ :return: A matcher which matches a Deferred which fires with a response like the one returned by the **unblinded-tokens** endpoint. :param int all_token_count: The expected value in the ``total`` field of the response. :param int returned_token_count: The expected number of tokens in the ``unblinded-tokens`` field of the response. """ return succeeded_with_unblinded_tokens_with_matcher( all_token_count, MatchesAll( HasLength(returned_token_count), AllMatch(IsInstance(unicode)), ), matches_lease_maintenance_spending(), )
def failed(reason): self.assertThat(reason, IsInstance(Failure)) reason.trap(KubernetesError) self.assertThat( reason.value, MatchesStructure( code=Equals(NOT_FOUND), status=Equals( v1.Status( metadata={}, status=u"Failure", message= u"the server could not find the requested resource", reason=u"NotFound", details=dict(), code=NOT_FOUND, )), ), )
def test_get_multiple_validation_sets(self): build_assertion = self.client.post_validation_sets_build_assertion( validation_sets=self.validation_sets_build ) vs_1 = self.client.post_validation_sets( signed_validation_sets=self._fake_sign(build_assertion) ) # Create a different build_assertion from the first one. build_assertion.name = "not-acme" vs_2 = self.client.post_validation_sets( signed_validation_sets=self._fake_sign(build_assertion) ) vs = self.client.get_validation_sets() self.assertThat(vs, IsInstance(validation_sets.ValidationSets)) self.expectThat(vs.assertions, Contains(vs_1.assertions[0])) self.expectThat(vs.assertions, Contains(vs_2.assertions[0]))
def test_enough_shares(self): """ If enough shares are found to reassemble the data then ``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred`` that fires with share and share placement information. """ storage_index = b"a" * 16 serverids = list(ch * 20 for ch in [b"b", b"c"]) storages = list( FoolscapStorageServer(StorageServer(self.mktemp(), serverid)) for serverid in serverids) rrefs_with_ueb = list( LocalWrapper(storage, fireNow) for storage in storages) ueb = { "needed_shares": len(serverids), "total_shares": len(serverids), "segment_size": 128 * 1024, "size": 1024, } for n, rref_with_ueb in enumerate(rrefs_with_ueb): yield write_good_share(rref_with_ueb, storage_index, ueb, [n]) servers_with_ueb = list( NoNetworkServer(serverid, rref_with_ueb) for (serverid, rref_with_ueb) in zip(serverids, rrefs_with_ueb)) peers = {storage_index: servers_with_ueb} caf = offloaded.CHKCheckerAndUEBFetcher( peers.get, storage_index, None, ) self.assertThat( caf.check(), succeeded( MatchesListwise([ Equals({ n: {serverid} for (n, serverid) in enumerate(serverids) }), Equals(ueb), IsInstance(bytes), ])), )
def test_create_with_values(self): serial = randint(1, 5000) created = datetime.now() - timedelta(minutes=1098) source = factory.make_name("source") pub = DNSPublication(serial=serial, created=created, source=source) pub.save() self.assertThat( pub, MatchesStructure( serial=Equals(serial), created=MatchesAll( IsInstance(datetime), # `created` is always set; given values are ignored. Not(Equals(created)), first_only=True, ), source=Equals(source), ), )
def test_positive_integer_price(self, pass_value, parameters, file_sizes): """ The result of ``PriceCalculator.calculate`` for a non-empty size list is always a positive integer. """ k, happy, N = parameters calculator = PriceCalculator( pass_value=pass_value, shares_needed=k, shares_total=N, ) price = calculator.calculate(file_sizes) self.assertThat( price, MatchesAll( IsInstance((int, long)), GreaterThan(0), ), )
def test_configures_thread_pool(self): # Patch and restore where it's visible because patching a running # reactor is potentially fairly harmful. patcher = monkey.MonkeyPatcher() patcher.add_patch(reactor, "threadpool", None) patcher.add_patch(reactor, "threadpoolForDatabase", None) patcher.patch() try: service_maker = RegionServiceMaker("Harry", "Hill") # Disable _ensureConnection() its not allowed in the reactor. self.patch_autospec(service_maker, "_ensureConnection") # Disable _preformStartUp() as it performs operations we don't want # in the testing environment. self.patch_autospec(service_maker, "_performStartUp") service_maker.makeService(Options()) threadpool = reactor.getThreadPool() self.assertThat(threadpool, IsInstance(ThreadPool)) finally: patcher.restore()
def test_missing_zone(self, token, subdomain, zone_name): """ `.ZoneNotFound` is raised if the configured zone cannot be found at the configured provider. """ challenge = self._challenge_factory(token=token) response = challenge.response(RSA_KEY_512) responder = self._responder_factory(zone_name=zone_name) server_name = u'{}.{}'.format(subdomain, zone_name) for zone in responder._driver.list_zones(): zone.delete() d = maybeDeferred(responder.start_responding, server_name, challenge, response) self._perform() self.assertThat( d, failed_with( MatchesAll(IsInstance(ZoneNotFound), MatchesStructure(zone_name=Equals(zone_name)))))
def test_regionRefresh_refreshes_a_region(self): region = factory.make_RegionController() self.patch(region, "refresh").return_value = Deferred() d = start_up.refreshRegion(region) self.assertThat(d, IsInstance(Deferred)) exception = factory.make_exception_type() with TwistedLoggerFixture() as logger: d.errback(exception("boom")) # The exception is suppressed ... self.assertThat(extract_result(d), Is(None)) # ... but it has been logged. self.assertThat( logger.output, DocTestMatches(""" Failure when refreshing region. Traceback (most recent call last):... Failure: maastesting.factory.TestException#...: boom """), )
def test_wrong_zone(self, token, subdomain, zone_name): """ Trying to respond for a domain not in the configured zone results in a `.NotInZone` exception. """ challenge = self._challenge_factory(token=token) response = challenge.response(RSA_KEY_512) responder = self._responder_factory(zone_name=zone_name) server_name = u'{}.{}.junk'.format(subdomain, zone_name) d = maybeDeferred(responder.start_responding, server_name, challenge, response) self._perform() self.assertThat( d, failed_with( MatchesAll( IsInstance(NotInZone), MatchesStructure(server_name=EndsWith(u'.' + server_name), zone_name=Equals(zone_name)))))
def test_includes_diff_of_mismatch_multiple_lines(self): expected = "A line of text that differs\nat the end of the 2nd line." observed = "A line of text that differs\nat the end of the 2ND line." mismatch = TextEquals(expected).match(observed) details = mismatch.get_details() self.assertThat(details, ContainsDict({"diff": IsInstance(Content)})) self.assertThat( details["diff"].as_text(), Equals( dedent("""\ --- expected +++ observed A line of text that differs - at the end of the 2nd line. ? ^^ + at the end of the 2ND line. ? ^^ """)), )
def test_make_IPCMasterService(self): service = eventloop.make_IPCMasterService() self.assertThat(service, IsInstance(ipc.IPCMasterService)) # It is registered as a factory in RegionEventLoop. self.assertIs( eventloop.make_IPCMasterService, eventloop.loop.factories["ipc-master"]["factory"], ) # Has a no dependencies. self.assertEquals( [], eventloop.loop.factories["ipc-master"]["requires"] ) # Has an optional dependency on workers. self.assertEquals( ["workers"], eventloop.loop.factories["ipc-master"]["optional"] ) self.assertTrue( eventloop.loop.factories["ipc-master"]["only_on_master"] )
def test_expands_dict_fields(self): field_name = factory.make_name('field_name') sub_fields = { factory.make_name('sub_field'): CharField() for _ in range(3) } fields = {field_name: DictCharField(sub_fields)} defaults = { "%s_%s" % (field_name, field): factory.make_name('subfield') for field in sub_fields.keys() } data = {field_name: DictCharField(fields)} results = get_overridden_query_dict(defaults, data, fields) expected = {key: Equals(value) for key, value in defaults.items()} expected.update({ name: IsInstance(value.__class__) for name, value in fields.items() }) self.assertThat(results, MatchesDict(expected))
def test__create_bridge_with_objects(self): origin = make_origin() Interfaces, Interface = origin.Interfaces, origin.Interface system_id = make_string_without_spaces() node = origin.Node(system_id) name = make_string_without_spaces() mac_address = "00:11:22:33:44:55" tags = [ make_string_without_spaces() for _ in range(3) ] mtu = random.randint(1500, 3000) vlan_id = random.randint(1, 10) vlan = origin.Vlan({ 'fabric_id': random.randint(1, 20), 'id': vlan_id, }) accept_ra = random.choice([True, False]) autoconf = random.choice([True, False]) parent_id = random.randint(1, 10) parent_obj = Interface((system_id, parent_id)) bridge_stp = random.choice([True, False]) bridge_fd = random.randint(1, 10) interface_data = { 'system_id': system_id, 'id': random.randint(1, 20), 'type': InterfaceType.VLAN.value, 'name': name, 'tags': tags, } Interfaces._handler.create_bridge.return_value = interface_data nic = Interfaces.create( node=node, interface_type=InterfaceType.BRIDGE, parent=parent_obj, name=name, tags=tags, mtu=mtu, vlan=vlan, accept_ra=accept_ra, autoconf=autoconf, mac_address=mac_address, bridge_stp=bridge_stp, bridge_fd=bridge_fd) self.assertThat(nic, IsInstance(Interface)) Interfaces._handler.create_bridge.assert_called_once_with( system_id=system_id, parent=parent_id, name=name, tags=tags, mtu=mtu, vlan=vlan_id, accept_ra=accept_ra, autoconf=autoconf, mac_address=mac_address, bridge_stp=bridge_stp, bridge_fd=bridge_fd)
def test_updates_interfaces_in_database(self): region = yield deferToDatabase(factory.make_RegionController) region.owner = yield deferToDatabase(factory.make_admin) yield deferToDatabase(region.save) # Declare this region controller as the one running here. self.useFixture(MAASIDFixture(region.system_id)) interfaces = { factory.make_name("eth"): { "type": "physical", "mac_address": factory.make_mac_address(), "parents": [], "links": [], "enabled": True, } } service = RegionNetworksMonitoringService(reactor, enable_beaconing=False) service.getInterfaces = lambda: succeed(interfaces) with FakeLogger("maas") as logger: service.startService() yield service.stopService() # Nothing was logged. self.assertThat( logger.output, DocTestMatches("Networks monitoring service: " "Process ID ... assumed responsibility.")) def get_interfaces(): return list(region.interface_set.all()) interfaces_observed = yield deferToDatabase(get_interfaces) self.assertThat(interfaces_observed, HasLength(1)) interface_observed = interfaces_observed[0] self.assertThat(interface_observed, IsInstance(PhysicalInterface)) self.assertThat(interfaces, Contains(interface_observed.name)) interface_expected = interfaces[interface_observed.name] self.assertThat(interface_observed.mac_address.raw, Equals(interface_expected["mac_address"]))
def test__create(self): origin = make_origin() system_id = make_name_without_spaces("system-id") lv_id = random.randint(1, 20) lv_name = make_name_without_spaces("lvname") VolumeGroup = origin.VolumeGroup vg = VolumeGroup({ "system_id": system_id, "id": random.randint(21, 30), }) VolumeGroup._handler.create_logical_volume.return_value = { "system_id": system_id, "id": lv_id, } uuid = make_name_without_spaces("uuid") tags = [make_name_without_spaces("tag")] BlockDevice = origin.BlockDevice BlockDevice._handler.read.return_value = { "system_id": system_id, "id": lv_id, "name": lv_name, "tags": [], "uuid": uuid, } LogicalVolumes = origin.LogicalVolumes observed = LogicalVolumes.create( vg, lv_name, 10 * 1024, uuid=uuid, tags=tags) self.assertThat(observed, IsInstance(logical_volumes.LogicalVolume)) self.assertThat(observed.name, Equals(lv_name)) VolumeGroup._handler.create_logical_volume.assert_called_once_with( system_id=system_id, id=vg.id, name=lv_name, size=10 * 1024, uuid=uuid) BlockDevice._handler.read.assert_called_once_with( system_id=system_id, id=lv_id) BlockDevice._handler.add_tag.assert_called_once_with( system_id=system_id, id=lv_id, tag=tags[0])
def test__interface_links_create_LINK_UP(self): origin = make_origin() Interface, Subnet = origin.Interface, origin.Subnet system_id = make_string_without_spaces() interface_data = { "system_id": system_id, "id": random.randint(0, 100), "name": make_string_without_spaces(), "type": InterfaceType.PHYSICAL.value, "links": [], } interface = Interface(interface_data) updated_data = dict(interface_data) link_id = random.randint(0, 100) subnet_id = random.randint(1, 100) updated_data["links"] = [{ "id": link_id, "mode": LinkMode.LINK_UP.value, "subnet": { "id": subnet_id } }] Interface._handler.link_subnet.return_value = updated_data interface.links.create(LinkMode.LINK_UP, subnet=Subnet(subnet_id)) Interface._handler.link_subnet.assert_called_once_with( system_id=interface.node.system_id, id=interface.id, mode=LinkMode.LINK_UP.value, subnet=subnet_id, default_gateway=False, force=False, ) self.assertThat( interface.links, MatchesSetwise( MatchesStructure( id=Equals(link_id), mode=Equals(LinkMode.LINK_UP), subnet=MatchesAll(IsInstance(Subnet), MatchesStructure(id=Equals(subnet_id))), )), )
def test__create_physical_with_objects(self): origin = make_origin() Interfaces, Interface = origin.Interfaces, origin.Interface system_id = make_string_without_spaces() node = origin.Node(system_id) mac_address = "00:11:22:33:44:55" name = make_string_without_spaces() tags = [make_string_without_spaces() for _ in range(3)] mtu = random.randint(1500, 3000) vlan_id = random.randint(1, 20) vlan = origin.Vlan({"fabric_id": random.randint(1, 20), "id": vlan_id}) accept_ra = random.choice([True, False]) autoconf = random.choice([True, False]) interface_data = { "system_id": system_id, "id": random.randint(1, 20), "type": InterfaceType.PHYSICAL.value, "name": name, "tags": tags, } Interfaces._handler.create_physical.return_value = interface_data nic = Interfaces.create( node=node, mac_address=mac_address, name=name, tags=tags, mtu=mtu, vlan=vlan, accept_ra=accept_ra, autoconf=autoconf, ) self.assertThat(nic, IsInstance(Interface)) Interfaces._handler.create_physical.assert_called_once_with( system_id=system_id, mac_address=mac_address, name=name, tags=tags, mtu=mtu, vlan=vlan_id, accept_ra=accept_ra, autoconf=autoconf, )
def test_getConfiguration_returns_configuration_object(self): is_region, is_rack = factory.pick_bool(), factory.pick_bool() allowed_cidrs = self.make_cidrs() proxy_enabled = factory.pick_bool() proxy_prefer_v4_proxy = factory.pick_bool() proxy_port = random.randint(1000, 8000) rpc_service, protocol = yield prepareRegion( self, is_region=is_region, is_rack=is_rack, proxy_enabled=proxy_enabled, proxy_allowed_cidrs=allowed_cidrs, proxy_port=proxy_port, proxy_prefer_v4_proxy=proxy_prefer_v4_proxy, ) region_ips = self.extract_regions(rpc_service) service, proxy = self.make_RackProxy_ExternalService( rpc_service, reactor ) yield service.startService() self.addCleanup((yield service.stopService)) config = yield service._getConfiguration() observed = proxy._getConfiguration( config.controller_type, config.proxy_configuration, config.connections, ) self.assertThat(observed, IsInstance(external._ProxyConfiguration)) self.assertThat( observed, MatchesStructure.byEquality( enabled=proxy_enabled, port=proxy_port, allowed_cidrs=allowed_cidrs, prefer_v4_proxy=proxy_prefer_v4_proxy, upstream_proxies=region_ips, is_region=is_region, is_rack=is_rack, ), )
def test_not_json_error(self): """ Documint errors that have a JSON content type but do not contain valid JSON raise `MalformedDocumintError`. """ def _response_for(method, url, params, headers, data): return (400, {b'Content-Type': b'application/json'}, b'hello world') resource = StringStubbingResource(_response_for) treq = StubTreq(resource) request = documint_request_factory(treq.request) self.assertThat( request(b'GET', b'http://example.com/not_json_error'), failed( AfterPreprocessing( lambda f: f.value, MatchesAll( IsInstance(MalformedDocumintError), MatchesStructure(data=Equals(b'hello world'))))))
def test__zones_create(self): origin = make_origin() zone_id = random.randint(0, 100) name = make_string_without_spaces() description = make_string_without_spaces() origin.Zones._handler.create.return_value = { "id": zone_id, "name": name, "description": description, } zone = origin.Zones.create(name=name, description=description) origin.Zones._handler.create.assert_called_once_with( name=name, description=description) self.assertThat(zone, IsInstance(origin.Zone)) self.assertThat( zone, MatchesStructure.byEquality(id=zone_id, name=name, description=description), )
def test_make_WebApplicationService(self): service = eventloop.make_WebApplicationService( FakePostgresListenerService(), sentinel.status_worker) self.assertThat(service, IsInstance(webapp.WebApplicationService)) # The endpoint is set to port 5243 on localhost. self.assertThat( service.endpoint, MatchesStructure.byEquality(reactor=reactor, addressFamily=socket.AF_INET6)) self.assertThat(service.endpoint.port, Equals(DEFAULT_PORT)) # IPv6 address is: (host, port, flowinfo, scopeid) self.assertThat(service.endpoint.socket.getsockname(), Equals(("::", DEFAULT_PORT, 0, 0))) # It is registered as a factory in RegionEventLoop. self.assertIs(eventloop.make_WebApplicationService, eventloop.loop.factories["web"]["factory"]) # Has a dependency of postgres-listener. self.assertEquals(["postgres-listener-worker", "status-worker"], eventloop.loop.factories["web"]["requires"]) self.assertFalse(eventloop.loop.factories["web"]["only_on_master"])
def test_makeTest_makes_tests_from_test_case_class_with_scenarios(self): class SomeTests(MAASTestCase): scenarios = [("scn1", {"attr": 1}), ("scn2", {"attr": 2})] def test_a(self): pass def test_b(self): pass tests = self.makeTest(Scenarios(), SomeTests, self) self.assertThat(tests, HasLength(4)) self.assertThat(tests, AllMatch(IsInstance(SomeTests))) self.assertThat( {(test._testMethodName, test.attr) for test in tests}, Equals( {("test_a", 1), ("test_a", 2), ("test_b", 1), ("test_b", 2)} ), )
def test_non_existant(self): """ :py:`get_pathinfo` returns a :py:`PathInfo` when given path that does not exist. """ path = FilePath(self.mktemp()) path_info = get_pathinfo(path) self.assertThat( path_info, MatchesAll( IsInstance(PathInfo), MatchesStructure.byEquality( is_dir=False, is_file=False, is_link=False, exists=False, state=None, ), ), )
def test_fifo(self): """ :py:`get_pathinfo` returns a :py:`PathInfo` when given a named pipe. """ path = FilePath(self.mktemp()) os.mkfifo(path.path) path_info = get_pathinfo(path) self.assertThat( path_info, MatchesAll( IsInstance(PathInfo), MatchesStructure.byEquality( is_dir=False, is_file=False, is_link=False, exists=True, state=None, ), ), )
def test_make_RackControllerService(self): service = eventloop.make_RackControllerService( FakePostgresListenerService(), sentinel.rpc_advertise ) self.assertThat( service, IsInstance(rack_controller.RackControllerService) ) # It is registered as a factory in RegionEventLoop. self.assertIs( eventloop.make_RackControllerService, eventloop.loop.factories["rack-controller"]["factory"], ) # Has a dependency of ipc-worker and postgres-listener. self.assertEquals( ["ipc-worker", "postgres-listener-worker"], eventloop.loop.factories["rack-controller"]["requires"], ) self.assertFalse( eventloop.loop.factories["rack-controller"]["only_on_master"] )
def test__getConfiguration_returns_configuration_object(self): is_region, is_rack = factory.pick_bool(), factory.pick_bool() servers, peers = self.make_servers_and_peers() rpc_service, protocol = yield prepareRegion(self, is_region=is_region, is_rack=is_rack, servers=servers, peers=peers) service, ntp = self.make_RackNTP_ExternalService(rpc_service, reactor) config = yield service._getConfiguration() observed = ntp._getConfiguration(config.controller_type, config.time_configuration) self.assertThat(observed, IsInstance(external._NTPConfiguration)) self.assertThat( observed, MatchesStructure.byEquality(references=servers, peers=peers, is_region=is_region, is_rack=is_rack))
def test__tags(self): origin = make_origin() tag_names = [make_name_without_spaces("tag") for _ in range(3)] system_id = make_name_without_spaces("system-id") node = origin.Node({ "id": 1, "system_id": system_id, "hostname": make_name_without_spaces("hostname"), "tag_names": tag_names, }) self.assertThat( node.tags, MatchesListwise([ MatchesAll(IsInstance(origin.Tag), MatchesStructure(name=Equals(tag_name))) for tag_name in tag_names ]))