def test__Version_read(self): session = bones.SessionAPI(self.description) action = session.Version.read self.assertThat(action, MatchesStructure.byEquality( name="read", fullname="Version.read", method="GET", handler=session.Version, is_restful=True, op=None, ))
def test_updateWorkItems_merges_with_existing_ones(self): spec = self.factory.makeSpecification( product=self.factory.makeProduct()) login_person(spec.owner) # Create two work-items in our database. wi1_data = self._createWorkItemAndReturnDataDict(spec) wi2_data = self._createWorkItemAndReturnDataDict(spec) self.assertEqual(2, len(spec.work_items)) # These are the work items we'll be inserting. new_wi1_data = dict( title=u'Some Title', status=SpecificationWorkItemStatus.TODO, assignee=None, milestone=None) new_wi2_data = dict( title=u'Other title', status=SpecificationWorkItemStatus.TODO, assignee=None, milestone=None) # We want to insert the two work items above in the first and third # positions respectively, so the existing ones to be moved around # (e.g. have their sequence updated). work_items = [new_wi1_data, wi1_data, new_wi2_data, wi2_data] spec.updateWorkItems(work_items) # Update our data dicts with the sequences we expect the work items in # our DB to have. new_wi1_data['sequence'] = 0 wi1_data['sequence'] = 1 new_wi2_data['sequence'] = 2 wi2_data['sequence'] = 3 self.assertEqual(4, len(spec.work_items)) for data, obj in zip(work_items, list(spec.work_items)): self.assertThat(obj, MatchesStructure.byEquality(**data))
def test_values(self): task = self.factory.makeBugTask() with person_logged_in(task.product.owner): task.transitionToAssignee(self.factory.makePerson()) task.transitionToMilestone( self.factory.makeMilestone(product=task.product), task.product.owner) task.bug.markAsDuplicate(self.factory.makeBug()) flat = self.getBugTaskFlat(task) self.assertThat( flat, MatchesStructure.byEquality( bugtask=task.id, bug=task.bug.id, datecreated=task.datecreated.replace(tzinfo=None), duplicateof=task.bug.duplicateof.id, bug_owner=task.bug.owner.id, information_type=task.bug.information_type.value, date_last_updated=task.bug.date_last_updated.replace( tzinfo=None), heat=task.bug.heat, product=task.product.id, productseries=None, distribution=None, distroseries=None, sourcepackagename=None, status=task.status.value, importance=task.importance.value, assignee=task.assignee.id, milestone=task.milestone.id, owner=task.owner.id, active=task.product.active, access_policies=None, access_grants=None)) self.assertIsNot(None, flat.fti)
def test_if_message_given_message(self): # Annotate.if_message returns an annotated version of the matcher if a # message is provided. matcher = Equals(1) expected = Annotate("foo", matcher) annotated = Annotate.if_message("foo", matcher) self.assertThat(annotated, MatchesStructure.fromExample(expected, "annotation", "matcher"))
def test_merge_accesspolicygrants_conflicts(self): # Conflicting AccessPolicyGrants are deleted. policy = self.factory.makeAccessPolicy() person = self.factory.makePerson() person_grantor = self.factory.makePerson() person_grant = self.factory.makeAccessPolicyGrant( grantee=person, grantor=person_grantor, policy=policy) person_grant_date = person_grant.date_created duplicate = self.factory.makePerson() duplicate_grantor = self.factory.makePerson() self.factory.makeAccessPolicyGrant( grantee=duplicate, grantor=duplicate_grantor, policy=policy) self._do_premerge(duplicate, person) with person_logged_in(person): self._do_merge(duplicate, person) # Only one grant for the policy exists: the retained person's. source = getUtility(IAccessPolicyGrantSource) self.assertThat( source.findByPolicy([policy]).one(), MatchesStructure.byEquality( policy=policy, grantee=person, date_created=person_grant_date))
def test_WithNativeArgs(self): # Options can be passed as the string representations of the # types the script wants them in. options = parse_opts([ '--submitter=1', '--reviewer=2', '--id=3', '--id=4', '--potemplate=5', '--language=te', '--not-language', '--is-current-ubuntu=True', '--is-current-upstream=False', '--msgid=Hello', '--origin=1', '--force', ]) self.assertThat(options, MatchesStructure.byEquality( submitter=1, reviewer=2, ids=[3, 4], potemplate=5, language='te', not_language=True, is_current_ubuntu=True, is_current_upstream=False, origin=1, force=True))
def _dup_work_items_set_up(self): spec = self.factory.makeSpecification( product=self.factory.makeProduct()) login_person(spec.owner) # Create two work-items in our database. wi1_data = self._createWorkItemAndReturnDataDict(spec) wi2_data = self._createWorkItemAndReturnDataDict(spec) # Create a duplicate and a near duplicate, insert into DB. new_wi1_data = wi2_data.copy() new_wi2_data = new_wi1_data.copy() new_wi2_data['status'] = SpecificationWorkItemStatus.DONE work_items = [new_wi1_data, wi1_data, new_wi2_data, wi2_data] spec.updateWorkItems(work_items) # Update our data dicts with the sequences to match data in DB new_wi1_data['sequence'] = 0 wi1_data['sequence'] = 1 new_wi2_data['sequence'] = 2 wi2_data['sequence'] = 3 self.assertEqual(4, len(spec.work_items)) for data, obj in zip(work_items, spec.work_items): self.assertThat(obj, MatchesStructure.byEquality(**data)) return spec, work_items
def test_toTerm_empty_description(self): archive = self.factory.makeArchive(description='') vocab = PPAVocabulary() term = vocab.toTerm(archive) self.assertThat(term, MatchesStructure.byEquality( value=archive, token='%s/%s' % (archive.owner.name, archive.name), title='No description available'))
def test_initialisation(self): server_address = factory.getRandomString() shared_key = factory.getRandomString() shell = Omshell(server_address, shared_key) self.assertThat( shell, MatchesStructure.byEquality( server_address=server_address, shared_key=shared_key))
def test__Machines_deployment_status(self): session = bones.SessionAPI(self.description, ("a", "b", "c")) action = session.Machines.deployment_status self.assertThat(action, MatchesStructure.byEquality( name="deployment_status", fullname="Machines.deployment_status", method="GET", handler=session.Machines, is_restful=False, op="deployment_status", ))
def test_productseries_target(self): ps = self.factory.makeProductSeries() task = self.factory.makeBugTask(target=ps) flat = self.getBugTaskFlat(task) self.assertThat( flat, MatchesStructure.byEquality( product=None, productseries=ps.id, distribution=None, distroseries=None, sourcepackagename=None, active=True))
def test_getConfigs_maps_distro_and_purpose_to_matching_config(self): distro = self.makeDistroWithPublishDirectory() script = self.makeScript(distro) script.setUp() reference_config = getPubConfig(distro.main_archive) config = script.getConfigs()[distro][ArchivePurpose.PRIMARY] self.assertThat( config, MatchesStructure.fromExample( reference_config, 'temproot', 'distroroot', 'archiveroot'))
def test_getTermByToken(self): vocab = InformationTypeVocabulary([InformationType.PUBLIC]) self.assertThat( vocab.getTermByToken('PUBLIC'), MatchesStructure.byEquality( value=InformationType.PUBLIC, token='PUBLIC', title='Public', description=InformationType.PUBLIC.description))
def test_DDEBsGetOverrideFromDEBs(self): # Test the basic case ensuring that DDEB files always match the # DEB's overrides. deb = self.addFile("foo_1.0_i386.deb", "main/devel", "extra") ddeb = self.addFile("foo-dbgsym_1.0_i386.ddeb", "universe/web", "low") self.assertMatchDDEBErrors([]) self.upload._overrideDDEBSs() self.assertThat(ddeb, MatchesStructure.fromExample(deb, "component_name", "section_name", "priority_name"))
def test_delete_duplicate_work_item(self): spec, work_items = self._dup_work_items_set_up() # Delete a duplicate work item work_items.pop() spec.updateWorkItems(work_items) self.assertEqual(3, len(spec.work_items)) for data, obj in zip(work_items, list(spec.work_items)): self.assertThat(obj, MatchesStructure.byEquality(**data))
def test_sourcepackage_target(self): sp = self.factory.makeSourcePackage() task = self.factory.makeBugTask(target=sp) flat = self.getBugTaskFlat(task) self.assertThat( flat, MatchesStructure.byEquality( product=None, productseries=None, distribution=None, distroseries=sp.distroseries.id, sourcepackagename=sp.sourcepackagename.id, active=True))
def test_new_creates_nodegroup_with_given_dhcp_settings(self): name = factory.make_name('nodegroup') uuid = factory.make_name('uuid') dhcp_network, dhcp_settings = make_dhcp_settings() ip = factory.getRandomIPInNetwork(dhcp_network) nodegroup = NodeGroup.objects.new(name, uuid, ip, **dhcp_settings) nodegroup = reload_object(nodegroup) interface = get_one(nodegroup.nodegroupinterface_set.all()) self.assertEqual(name, nodegroup.name) self.assertThat( interface, MatchesStructure.byEquality(**dhcp_settings))
def test_creates_commissioning_script(self): content = factory.getRandomString() name = factory.make_name('filename') uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertTrue(form.is_valid(), form._errors) form.save() new_script = CommissioningScript.objects.get(name=name) self.assertThat( new_script, MatchesStructure.byEquality(name=name, content=content))
def test_add_duplicate_work_item(self): spec, work_items = self._dup_work_items_set_up() # Test that we can insert another duplicate work item. new_wi3_data = work_items[0].copy() new_wi3_data['sequence'] = 4 work_items.append(new_wi3_data) spec.updateWorkItems(work_items) self.assertEqual(5, len(spec.work_items)) for data, obj in zip(work_items, list(spec.work_items)): self.assertThat(obj, MatchesStructure.byEquality(**data))
def test__whoami(self): username = make_name_without_spaces("username") email = make_name_without_spaces("user@") is_admin = pick_bool() Users = make_origin().Users Users._handler.whoami.return_value = { "username": username, "email": email, "is_superuser": is_admin} user = Users.whoami() self.assertThat(user, MatchesStructure.byEquality( username=username, email=email, is_admin=is_admin))
def test_arg_setup(self): parser = self.get_parser() filename = factory.getRandomString() args = parser.parse_args(( '--no-overwrite', '--filename', filename, '--mode', "111")) self.assertThat( args, MatchesStructure.byEquality( no_overwrite=True, filename=filename, mode="111"))
def test_ensure_master_creates_minimal_interface(self): master = NodeGroup.objects.ensure_master() interface = get_one(master.nodegroupinterface_set.all()) self.assertThat( interface, MatchesStructure.byEquality( ip='127.0.0.1', subnet_mask=None, broadcast_ip=None, router_ip=None, ip_range_low=None, ip_range_high=None, ))
def test_inject_result_stores_data(self): node = factory.make_node() name = factory.make_name("result") output = factory.getRandomBytes() exit_status = next(factory.random_octets) inject_result(node, name, output, exit_status) self.assertThat( NodeCommissionResult.objects.get(node=node, name=name), MatchesStructure.byEquality( node=node, name=name, script_result=exit_status, data=output))
def test_can_edit_cluster(self): nodegroup = factory.make_node_group() edit_link = reverse('cluster-edit', args=[nodegroup.uuid]) data = { 'cluster_name': factory.make_name('cluster_name'), 'name': factory.make_name('name'), 'status': factory.getRandomEnum(NODEGROUP_STATUS), } response = self.client.post(edit_link, data) self.assertEqual(httplib.FOUND, response.status_code, response.content) self.assertThat( reload_object(nodegroup), MatchesStructure.byEquality(**data))
def test_creates_interface_from_params(self): name = factory.make_name('name') uuid = factory.getRandomUUID() interface = make_interface_settings() interfaces = json.dumps([interface]) form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertTrue(form.is_valid(), form._errors) form.save() nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertThat( nodegroup.nodegroupinterface_set.all()[0], MatchesStructure.byEquality(**interface))
def test_arg_setup(self): parser = ArgumentParser() writer.add_arguments(parser) args = parser.parse_args(self.test_args) self.assertThat( args, MatchesStructure.byEquality( subnet='subnet', subnet_mask='subnet-mask', broadcast_ip='broadcast-ip', dns_servers='dns-servers', router_ip='router-ip', omapi_key='omapi-key', ip_range_low='ip-range-low', ip_range_high='ip-range-high'))
def test_can_create_cluster_interface(self): nodegroup = factory.make_node_group( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) create_link = reverse( 'cluster-interface-create', args=[nodegroup.uuid]) data = factory.get_interface_fields() response = self.client.post(create_link, data) self.assertEqual( (httplib.FOUND, reverse('cluster-edit', args=[nodegroup.uuid])), (response.status_code, extract_redirect(response))) interface = NodeGroupInterface.objects.get( nodegroup__uuid=nodegroup.uuid, interface=data['interface']) self.assertThat( reload_object(interface), MatchesStructure.byEquality(**data))
def test_can_create_commissioning_script(self): content = factory.getRandomString() name = factory.make_name('filename') create_link = reverse('commissioning-script-add') filepath = self.make_file(name=name, contents=content) with open(filepath) as fp: response = self.client.post( create_link, {'name': name, 'content': fp}) self.assertEqual( (httplib.FOUND, reverse('settings')), (response.status_code, extract_redirect(response))) new_script = CommissioningScript.objects.get(name=name) self.assertThat( new_script, MatchesStructure.byEquality(name=name, content=content))
def assertWarningsEnabled(self, category): message = "%s from %s" % (category.__name__, self.package_name) filename, ext = splitext(__file__) with catch_warnings(record=True) as log: warn(message, category=category) self.assertThat(log, MatchesListwise([ MatchesStructure( message=MatchesAll( IsInstance(category), MatchesStructure.byEquality(args=(message,)), ), category=Equals(category), filename=StartsWith(filename), ), ]))
def test_updateWorkItems_marks_removed_ones_as_deleted(self): spec = self.factory.makeSpecification() self._createWorkItemAndReturnDataDict(spec) wi2_data = self._createWorkItemAndReturnDataDict(spec) self.assertEqual(2, len(spec.work_items)) login_person(spec.owner) # We have two work items in the DB but now we want to update them to # keep just the second one. The first will be deleted and the sequence # of the second will be changed. spec.updateWorkItems([wi2_data]) self.assertEqual(1, len(spec.work_items)) wi2_data['sequence'] = 0 self.assertThat( spec.work_items[0], MatchesStructure.byEquality(**wi2_data))
def test_create(self): user = factory.make_admin() source = factory.make_Subnet() destination = factory.make_Subnet( version=source.get_ipnetwork().version) gateway_ip = factory.pick_ip_in_Subnet(source) metric = random.randint(0, 500) handler = StaticRouteHandler(user, {}, None) staticroute = handler.create({ "source": source.id, "destination": destination.id, "gateway_ip": gateway_ip, "metric": metric, }) self.assertThat( StaticRoute.objects.get(id=staticroute['id']), MatchesStructure.byEquality(source=source, destination=destination, gateway_ip=gateway_ip, metric=metric))
def test_run_inactive(self): # A delivery for a webhook that has been deactivated immediately # fails. with CaptureOops() as oopses: job, reqs = self.makeAndRunJob( raises=requests.ConnectionError('Connection refused'), active=False) self.assertThat( job, MatchesStructure( status=Equals(JobStatus.FAILED), pending=Is(False), successful=Is(False), date_sent=Is(None), error_message=Equals('Webhook deactivated'), json_data=ContainsDict( {'result': MatchesDict( {'webhook_deactivated': Is(True)})}))) self.assertEqual([], reqs) self.assertEqual([], oopses.oopses)
def test__default_is_returned_when_value_not_found_in_object(self): class Example(Object): alice = ObjectField("alice", default=sentinel.alice_default) example = Example({}) # At first, referencing "alice" yields the default value. self.assertThat(example.alice, Is(sentinel.alice_default)) self.assertThat(example._data, Equals({})) # Setting "alice" stores the value in the object's _data dict. example.alice = sentinel.alice self.assertThat(example, MatchesStructure(alice=Is(sentinel.alice))) self.assertThat(example._data, Equals({"alice": sentinel.alice})) # Deleting "alice" removes the value from the object's _data dict, and # referencing "alice" again yields the default value. del example.alice self.assertThat(example.alice, Is(sentinel.alice_default)) self.assertThat(example._data, Equals({}))
def test__read(self): source = make_boot_source() selection_id = random.randint(0, 100) os = make_name_without_spaces("os") release = make_name_without_spaces("release") arches = [make_name_without_spaces("arch")] subarches = [make_name_without_spaces("subarches")] labels = [make_name_without_spaces("labels")] BootSourceSelection = make_origin().BootSourceSelection BootSourceSelection._handler.read.return_value = { "id": selection_id, "os": os, "release": release, "arches": arches, "subarches": subarches, "labels": labels} selection = BootSourceSelection.read(source, selection_id) BootSourceSelection._handler.read.assert_called_once_with( boot_source_id=source.id, id=selection_id) self.assertThat(selection, MatchesStructure.byEquality( id=selection_id, boot_source_id=source.id, os=os, release=release, arches=arches, subarches=subarches, labels=labels))
def test_updates_virtual_block_device(self): block_device = factory.make_VirtualBlockDevice() name = factory.make_name("lv") vguuid = "%s" % uuid.uuid4() size = random.randint( MIN_BLOCK_DEVICE_SIZE, block_device.filesystem_group.get_size()) form = UpdateVirtualBlockDeviceForm(instance=block_device, data={ 'name': name, 'uuid': vguuid, 'size': size, }) self.assertTrue(form.is_valid(), form.errors) block_device = form.save() expected_size = round_size_to_nearest_block( size, PARTITION_ALIGNMENT_SIZE, False) self.assertThat(block_device, MatchesStructure.byEquality( name=name, uuid=vguuid, size=expected_size, ))
def test_make_WebApplicationService(self): service = eventloop.make_WebApplicationService( FakePostgresListenerService(), sentinel.status_worker) self.assertThat(service, IsInstance(webapp.WebApplicationService)) # The endpoint is set to port 5243 on localhost. self.assertThat( service.endpoint, MatchesStructure.byEquality(reactor=reactor, addressFamily=socket.AF_INET6)) self.assertThat(service.endpoint.port, Equals(DEFAULT_PORT)) # IPv6 address is: (host, port, flowinfo, scopeid) self.assertThat(service.endpoint.socket.getsockname(), Equals(("::", DEFAULT_PORT, 0, 0))) # It is registered as a factory in RegionEventLoop. self.assertIs(eventloop.make_WebApplicationService, eventloop.loop.factories["web"]["factory"]) # Has a dependency of postgres-listener. self.assertEquals(["postgres-listener", "status-worker"], eventloop.loop.factories["web"]["requires"]) self.assertFalse(eventloop.loop.factories["web"]["only_on_master"])
def test__getConfiguration_returns_configuration_object(self): is_region, is_rack = factory.pick_bool(), factory.pick_bool() allowed_cidrs = self.make_cidrs() proxy_enabled = factory.pick_bool() proxy_prefer_v4_proxy = factory.pick_bool() proxy_port = random.randint(1000, 8000) rpc_service, protocol = yield prepareRegion( self, is_region=is_region, is_rack=is_rack, proxy_enabled=proxy_enabled, proxy_allowed_cidrs=allowed_cidrs, proxy_port=proxy_port, proxy_prefer_v4_proxy=proxy_prefer_v4_proxy, ) region_ips = self.extract_regions(rpc_service) service, proxy = self.make_RackProxy_ExternalService( rpc_service, reactor) yield service.startService() self.addCleanup((yield service.stopService)) config = yield service._getConfiguration() observed = proxy._getConfiguration( config.controller_type, config.proxy_configuration, config.connections, ) self.assertThat(observed, IsInstance(external._ProxyConfiguration)) self.assertThat( observed, MatchesStructure.byEquality( enabled=proxy_enabled, port=proxy_port, allowed_cidrs=allowed_cidrs, prefer_v4_proxy=proxy_prefer_v4_proxy, upstream_proxies=region_ips, is_region=is_region, is_rack=is_rack, ), )
def test_mark_vouchers_redeemed(self, get_config, now, voucher_value, public_key, num_tokens, data): """ The voucher for unblinded tokens that are added to the store is marked as redeemed. """ random = data.draw( lists( random_tokens(), min_size=num_tokens, max_size=num_tokens, unique=True, ), ) unblinded = data.draw( lists( unblinded_tokens(), min_size=num_tokens, max_size=num_tokens, unique=True, ), ) store = self.useFixture(TemporaryVoucherStore(get_config, lambda: now)).store store.add(voucher_value, len(random), 0, lambda: random) store.insert_unblinded_tokens_for_voucher(voucher_value, public_key, unblinded, completed=True) loaded_voucher = store.get(voucher_value) self.assertThat( loaded_voucher, MatchesStructure( expected_tokens=Equals(len(random)), state=Equals( Redeemed( finished=now, token_count=num_tokens, public_key=public_key, )), ), )
def test_add_idempotent(self, get_config, voucher, now, tokens): """ More than one call to ``VoucherStore.add`` with the same argument results in the same state as a single call. """ store = self.useFixture(TemporaryVoucherStore(get_config, lambda: now)).store first_tokens = store.add( voucher, expected_tokens=len(tokens), counter=0, get_tokens=lambda: tokens, ) second_tokens = store.add( voucher, # The voucher should already exists in the store so the # expected_tokens value supplied here is ignored. expected_tokens=0, counter=0, # Likewise, no need to generate tokens here because counter value # 0 was already added and tokens were generated then. If # get_tokens were called here, it would be an error. get_tokens=None, ) self.assertThat( store.get(voucher), MatchesStructure( number=Equals(voucher), expected_tokens=Equals(len(tokens)), created=Equals(now), state=Equals(Pending(counter=0)), ), ) self.assertThat( first_tokens, Equals(tokens), ) self.assertThat( second_tokens, Equals(tokens), )
def test_creates_handler_with_default_meta(self): handler = Handler(None, {}, None) self.assertThat( handler._meta, MatchesStructure( abstract=Is(False), allowed_methods=Equals( ["list", "get", "create", "update", "delete", "set_active"] ), handler_name=Equals(""), object_class=Is(None), queryset=Is(None), pk=Equals("id"), fields=Is(None), exclude=Is(None), list_fields=Is(None), list_exclude=Is(None), non_changeable=Is(None), form=Is(None), ), )
def test_checkLimit_no_token(self): _add_rate_limit_response("example.org", limit=60, remaining=50) with self.rate_limit.checkLimit("http://example.org/", 30): pass self.assertThat(responses.calls[0].request, MatchesStructure( path_url=Equals("/rate_limit"), headers=Not(Contains("Authorization")))) limit = self.rate_limit._limits[("example.org", None)] self.assertEqual(49, limit["remaining"]) self.assertEqual(1000000000, limit["reset"]) limit["remaining"] = 0 responses.reset() with ExpectedException( GitHubExceededRateLimit, r"Rate limit for example\.org exceeded " r"\(resets at Sun Sep 9 07:16:40 2001\)"): with self.rate_limit.checkLimit("http://example.org/", 30): pass self.assertEqual(0, len(responses.calls)) self.assertEqual(0, limit["remaining"])
class TestWarningsMatcherInterface(TestCase, TestMatchersInterface): """ Tests for `testtools.matchers._warnings.Warnings`. Specifically with the optional matcher argument. """ matches_matcher = Warnings(warnings_matcher=MatchesListwise([ MatchesStructure(message=AfterPreprocessing(str, Contains('old_func'))) ])) def old_func(): warnings.warn('old_func is deprecated', DeprecationWarning, 2) def older_func(): warnings.warn('older_func is deprecated', DeprecationWarning, 2) matches_matches = [old_func] matches_mismatches = [lambda: None, older_func] str_examples = [] describe_examples = []
def test_fields(self): domain = factory.make_string() serial = random.randint(1, 200) hostname = factory.make_string() network = factory.make_ipv4_network() ip = factory.pick_ip_in_network(network) default_ttl = random.randint(10, 300) mapping = {hostname: [ip]} dns_zone_config = DNSForwardZoneConfig(domain, serial=serial, default_ttl=default_ttl, mapping=mapping) self.assertThat( dns_zone_config, MatchesStructure.byEquality( domain=domain, serial=serial, _mapping=mapping, default_ttl=default_ttl, ), )
def test_activity_rule_removed(self): owner = self.factory.makeTeam() member = self.factory.makePerson(member_of=[owner]) repository = self.factory.makeGitRepository(owner=owner) rule = self.factory.makeGitRule(repository=repository, ref_pattern="refs/heads/*") with person_logged_in(member): rule.destroySelf(member) self.assertThat( repository.getActivity().first(), MatchesStructure(repository=Equals(repository), changer=Equals(member), changee=Is(None), what_changed=Equals(GitActivityType.RULE_REMOVED), old_value=MatchesDict({ "ref_pattern": Equals("refs/heads/*"), "position": Equals(0), }), new_value=Is(None)))
def test_creates_physical_block_device_with_id_path(self): node = factory.make_Node() name = factory.make_name("sd") id_path = factory.make_absolute_path() size = random.randint( MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE * 10) block_size = 4096 form = CreatePhysicalBlockDeviceForm(node, data={ 'name': name, 'id_path': id_path, 'size': size, 'block_size': block_size, }) self.assertTrue(form.is_valid(), form.errors) block_device = form.save() self.assertThat(block_device, MatchesStructure.byEquality( name=name, id_path=id_path, size=size, block_size=block_size, ))
def test__read(self): origin = make_origin() data = [ { "filename": make_name_without_spaces() }, { "filename": make_name_without_spaces() }, ] origin.Files._handler.read.return_value = data resources = origin.Files.read() self.assertEquals(2, len(resources)) self.assertThat(resources, IsInstance(origin.Files)) self.assertThat(resources, AllMatch(IsInstance(origin.File))) self.assertThat( resources, MatchesSetwise(*(MatchesStructure.byEquality( filename=entry["filename"]) for entry in data)))
def test_get_config_ntp_server_alias_for_ntp_servers(self): ntp_servers = factory.make_hostname() + " " + factory.make_hostname() Config.objects.set_config("ntp_servers", ntp_servers) response = self.client.get(reverse('maas_handler'), { "op": "get_config", "name": "ntp_server", }) self.assertThat( response, MatchesAll( # An HTTP 200 response, MatchesStructure(status_code=Equals(http.client.OK)), # with a JSON body, AfterPreprocessing(itemgetter("Content-Type"), Equals("application/json")), # containing the ntp_servers setting. AfterPreprocessing( lambda response: json.loads( response.content.decode(settings.DEFAULT_CHARSET)), Equals(ntp_servers)), ))
def test_creates_filesystem_with_mount_point_and_options(self): node = factory.make_Node() mount_point = factory.make_absolute_path() mount_options = factory.make_name("options") form = MountNonStorageFilesystemForm( node, data={ "fstype": self.fstype, "mount_point": mount_point, # Whitespace is stripped by form validation. 'mount_options': " " + mount_options + "\t\n", }) self.assertTrue(form.is_valid(), form.errors) filesystem = form.save() self.assertThat( filesystem, MatchesStructure.byEquality(node=node, fstype=self.fstype, mount_point=mount_point, mount_options=mount_options, is_mounted=True))
def test_when_certs_valid_certs_expired(self, fixture): """ The deferred returned by ``when_certs_valid`` only fires once all panicing and expired certs have been renewed. """ with fixture: service = fixture.service d = service.when_certs_valid() self.assertThat(d, has_no_result()) service.startService() self.assertThat(d, succeeded(Is(None))) max_expiry = fixture.now + service.panic_interval self.assertThat( fixture.cert_store.as_dict(), succeeded(AfterPreprocessing( methodcaller('values'), AllMatch(AllMatch( _match_certificate( MatchesStructure( not_valid_after=GreaterThan(max_expiry)))))))) self.assertThat(fixture.responder.challenges, HasLength(0))
def test_new_sorts_properties(self): (omapi_key, failover_peers, shared_networks, hosts, interfaces, global_dhcp_snippets) = self.make_args() state = dhcp.DHCPState( omapi_key, failover_peers, shared_networks, hosts, interfaces, global_dhcp_snippets) self.assertThat(state, MatchesStructure.byEquality( omapi_key=omapi_key, failover_peers=sorted(failover_peers, key=itemgetter("name")), shared_networks=sorted(shared_networks, key=itemgetter("name")), hosts={ host["mac"]: host for host in hosts }, interfaces=sorted([ interface["name"] for interface in interfaces ]), global_dhcp_snippets=sorted( global_dhcp_snippets, key=itemgetter("name")), ))
def test_global_numerical_selfLink(self): """ Dicts with ``name`` and a numerical ``selfLink`` and no ``zone`` raise an error that will help in debugging. """ name = u"RARENAME" selfLink = 123 self.assertThat( lambda: _create_poller({ u'name': name, u'selfLink': selfLink }), Raises( MatchesException( MalformedOperation, MatchesStructure(message=MatchesAll( Contains(unicode(selfLink)), # The actual value. Contains('/global/operations/'), # Expected url. Contains('selfLink'), # The malformed key. Contains(name), # The name of the operation. )))))
def test_error_index(self): """ The ``index`` of a :obj:`FirstError` is the index of the effect that failed in the list. """ expected_exc_info = get_exc_info(EquitableException(message='foo')) reraise = partial(six.reraise, *expected_exc_info) try: sync_perform( self.dispatcher, parallel([ Effect(Constant(1)), Effect(Func(reraise)), Effect(Constant(2)) ])) except FirstError as fe: self.assertThat( fe, MatchesStructure( index=Equals(1), exc_info=MatchesReraisedExcInfo(expected_exc_info)))
def test_properties(self): # Test the model properties. parent_series = self.factory.makeDistroSeries() derived_series = self.factory.makeDistroSeries() dsp = self.factory.makeDistroSeriesParent( derived_series=derived_series, parent_series=parent_series, initialized=True, inherit_overrides=True) self.assertThat( dsp, MatchesStructure.byEquality( derived_series=derived_series, parent_series=parent_series, initialized=True, is_overlay=False, inherit_overrides=True, component=None, pocket=None, ))
def test_tasks_are_all_run_before_shutdown_completes(self): service = DatabaseTasksService() service.startService() try: queue = service.queue event = threading.Event() count = random.randint(20, 40) for _ in range(count): service.addTask(event.wait) # The queue has `count` tasks (or `count - 1` tasks; the first may # have already been pulled off the queue) still pending. self.assertThat( queue.pending, MatchesAny(HasLength(count), HasLength(count - 1)), ) finally: event.set() service.stopService() # The queue is empty and nothing is waiting. self.assertThat(queue, MatchesStructure.byEquality(waiting=[], pending=[]))
def test_GET_returns_boot_source(self): self.become_admin() boot_source_selection = factory.make_BootSourceSelection() response = self.client.get( get_boot_source_selection_uri(boot_source_selection)) self.assertEqual(http.client.OK, response.status_code) returned_boot_source_selection = json_load_bytes(response.content) boot_source = boot_source_selection.boot_source # The returned object contains a 'resource_uri' field. self.assertEqual( reverse('boot_source_selection_handler', args=[boot_source.id, boot_source_selection.id]), returned_boot_source_selection['resource_uri']) # The other fields are the boot source selection's fields. del returned_boot_source_selection['resource_uri'] # All the fields are present. self.assertItemsEqual(DISPLAYED_BOOTSOURCESELECTION_FIELDS, returned_boot_source_selection.keys()) self.assertThat( boot_source_selection, MatchesStructure.byEquality(**returned_boot_source_selection))
def test_passes_returned_on_failure(self, num_passes): """ ``call_with_passes`` returns the passes it uses if the operation fails. """ passes = pass_factory(integer_passes(num_passes)) self.assertThat( call_with_passes( lambda group: fail(Exception("Anything")), num_passes, partial(passes.get, u"message"), ), failed(Always()), ) self.assertThat( passes, MatchesStructure( issued=Equals(set(passes.returned)), spent=Equals(set()), ), )
def test__getConfiguration_returns_configuration_object(self): is_region, is_rack = factory.pick_bool(), factory.pick_bool() trusted_networks = self.make_trusted_networks() rpc_service, protocol = yield prepareRegion( self, is_region=is_region, is_rack=is_rack, trusted_networks=trusted_networks) region_ips = self.extract_regions(rpc_service) service = self.make_startable_RackDNSService(rpc_service, reactor) yield service.startService() self.addCleanup((yield service.stopService)) observed = yield service._getConfiguration() self.assertThat(observed, IsInstance(dns._Configuration)) self.assertThat( observed, MatchesStructure.byEquality(upstream_dns=region_ips, trusted_networks=trusted_networks, is_region=is_region, is_rack=is_rack))
def test_activity_rule_added(self): owner = self.factory.makeTeam() member = self.factory.makePerson(member_of=[owner]) repository = self.factory.makeGitRepository(owner=owner) self.factory.makeGitRule(repository=repository, creator=member) self.factory.makeGitRule(repository=repository, ref_pattern="refs/heads/stable/*", creator=member) self.assertThat( repository.getActivity().first(), MatchesStructure(repository=Equals(repository), changer=Equals(member), changee=Is(None), what_changed=Equals(GitActivityType.RULE_ADDED), old_value=Is(None), new_value=MatchesDict({ "ref_pattern": Equals("refs/heads/stable/*"), "position": Equals(1), })))
def test__create_calls_create_with_unsigned_url(self): source_id = random.randint(0, 100) url = "http://images.maas.io/ephemeral-v3/daily/streams/v1/index.json" BootSources = make_origin().BootSources BootSources._handler.create.return_value = { "id": source_id, "url": url, "keyring_filename": "", "keyring_data": "" } source = BootSources.create(url) BootSources._handler.create.assert_called_once_with( url=url, keyring_filename="", keyring_data="") self.assertThat( source, MatchesStructure.byEquality(id=source_id, url=url, keyring_filename="", keyring_data=""))
def test_get_known_voucher_unpaid(self, get_config, api_auth_token, now, voucher): """ When a voucher is first ``PUT`` and then later a ``GET`` is issued for the same voucher then the response code is **OK** and details, including those relevant to a voucher which has failed redemption because it has not been paid for yet, about the voucher are included in a json-encoded response body. """ return self._test_get_known_voucher( get_config, api_auth_token, now, voucher, MatchesStructure( number=Equals(voucher), expected_tokens=Equals(NUM_TOKENS), created=Equals(now), state=Equals(Unpaid(finished=now, )), ), )