def _dup_work_items_set_up(self): spec = self.factory.makeSpecification( product=self.factory.makeProduct()) login_person(spec.owner) # Create two work-items in our database. wi1_data = self._createWorkItemAndReturnDataDict(spec) wi2_data = self._createWorkItemAndReturnDataDict(spec) # Create a duplicate and a near duplicate, insert into DB. new_wi1_data = wi2_data.copy() new_wi2_data = new_wi1_data.copy() new_wi2_data['status'] = SpecificationWorkItemStatus.DONE work_items = [new_wi1_data, wi1_data, new_wi2_data, wi2_data] spec.updateWorkItems(work_items) # Update our data dicts with the sequences to match data in DB new_wi1_data['sequence'] = 0 wi1_data['sequence'] = 1 new_wi2_data['sequence'] = 2 wi2_data['sequence'] = 3 self.assertEqual(4, len(spec.work_items)) for data, obj in zip(work_items, spec.work_items): self.assertThat(obj, MatchesStructure.byEquality(**data)) return spec, work_items
def test__Version_read(self): session = bones.SessionAPI(self.description) action = session.Version.read self.assertThat(action, MatchesStructure.byEquality( name="read", fullname="Version.read", method="GET", handler=session.Version, is_restful=True, op=None, ))
def test_updateWorkItems_merges_with_existing_ones(self): spec = self.factory.makeSpecification( product=self.factory.makeProduct()) login_person(spec.owner) # Create two work-items in our database. wi1_data = self._createWorkItemAndReturnDataDict(spec) wi2_data = self._createWorkItemAndReturnDataDict(spec) self.assertEqual(2, len(spec.work_items)) # These are the work items we'll be inserting. new_wi1_data = dict( title=u'Some Title', status=SpecificationWorkItemStatus.TODO, assignee=None, milestone=None) new_wi2_data = dict( title=u'Other title', status=SpecificationWorkItemStatus.TODO, assignee=None, milestone=None) # We want to insert the two work items above in the first and third # positions respectively, so the existing ones to be moved around # (e.g. have their sequence updated). work_items = [new_wi1_data, wi1_data, new_wi2_data, wi2_data] spec.updateWorkItems(work_items) # Update our data dicts with the sequences we expect the work items in # our DB to have. new_wi1_data['sequence'] = 0 wi1_data['sequence'] = 1 new_wi2_data['sequence'] = 2 wi2_data['sequence'] = 3 self.assertEqual(4, len(spec.work_items)) for data, obj in zip(work_items, list(spec.work_items)): self.assertThat(obj, MatchesStructure.byEquality(**data))
def test_values(self): task = self.factory.makeBugTask() with person_logged_in(task.product.owner): task.transitionToAssignee(self.factory.makePerson()) task.transitionToMilestone( self.factory.makeMilestone(product=task.product), task.product.owner) task.bug.markAsDuplicate(self.factory.makeBug()) flat = self.getBugTaskFlat(task) self.assertThat( flat, MatchesStructure.byEquality( bugtask=task.id, bug=task.bug.id, datecreated=task.datecreated.replace(tzinfo=None), duplicateof=task.bug.duplicateof.id, bug_owner=task.bug.owner.id, information_type=task.bug.information_type.value, date_last_updated=task.bug.date_last_updated.replace( tzinfo=None), heat=task.bug.heat, product=task.product.id, productseries=None, distribution=None, distroseries=None, sourcepackagename=None, status=task.status.value, importance=task.importance.value, assignee=task.assignee.id, milestone=task.milestone.id, owner=task.owner.id, active=task.product.active, access_policies=None, access_grants=None)) self.assertIsNot(None, flat.fti)
def test_merge_accesspolicygrants_conflicts(self): # Conflicting AccessPolicyGrants are deleted. policy = self.factory.makeAccessPolicy() person = self.factory.makePerson() person_grantor = self.factory.makePerson() person_grant = self.factory.makeAccessPolicyGrant( grantee=person, grantor=person_grantor, policy=policy) person_grant_date = person_grant.date_created duplicate = self.factory.makePerson() duplicate_grantor = self.factory.makePerson() self.factory.makeAccessPolicyGrant( grantee=duplicate, grantor=duplicate_grantor, policy=policy) self._do_premerge(duplicate, person) with person_logged_in(person): self._do_merge(duplicate, person) # Only one grant for the policy exists: the retained person's. source = getUtility(IAccessPolicyGrantSource) self.assertThat( source.findByPolicy([policy]).one(), MatchesStructure.byEquality( policy=policy, grantee=person, date_created=person_grant_date))
def test_WithNativeArgs(self): # Options can be passed as the string representations of the # types the script wants them in. options = parse_opts([ '--submitter=1', '--reviewer=2', '--id=3', '--id=4', '--potemplate=5', '--language=te', '--not-language', '--is-current-ubuntu=True', '--is-current-upstream=False', '--msgid=Hello', '--origin=1', '--force', ]) self.assertThat(options, MatchesStructure.byEquality( submitter=1, reviewer=2, ids=[3, 4], potemplate=5, language='te', not_language=True, is_current_ubuntu=True, is_current_upstream=False, origin=1, force=True))
def test__Machines_deployment_status(self): session = bones.SessionAPI(self.description, ("a", "b", "c")) action = session.Machines.deployment_status self.assertThat(action, MatchesStructure.byEquality( name="deployment_status", fullname="Machines.deployment_status", method="GET", handler=session.Machines, is_restful=False, op="deployment_status", ))
def test_initialisation(self): server_address = factory.getRandomString() shared_key = factory.getRandomString() shell = Omshell(server_address, shared_key) self.assertThat( shell, MatchesStructure.byEquality( server_address=server_address, shared_key=shared_key))
def test_toTerm_empty_description(self): archive = self.factory.makeArchive(description='') vocab = PPAVocabulary() term = vocab.toTerm(archive) self.assertThat(term, MatchesStructure.byEquality( value=archive, token='%s/%s' % (archive.owner.name, archive.name), title='No description available'))
def test_productseries_target(self): ps = self.factory.makeProductSeries() task = self.factory.makeBugTask(target=ps) flat = self.getBugTaskFlat(task) self.assertThat( flat, MatchesStructure.byEquality( product=None, productseries=ps.id, distribution=None, distroseries=None, sourcepackagename=None, active=True))
def test_getTermByToken(self): vocab = InformationTypeVocabulary([InformationType.PUBLIC]) self.assertThat( vocab.getTermByToken('PUBLIC'), MatchesStructure.byEquality( value=InformationType.PUBLIC, token='PUBLIC', title='Public', description=InformationType.PUBLIC.description))
def test_delete_duplicate_work_item(self): spec, work_items = self._dup_work_items_set_up() # Delete a duplicate work item work_items.pop() spec.updateWorkItems(work_items) self.assertEqual(3, len(spec.work_items)) for data, obj in zip(work_items, list(spec.work_items)): self.assertThat(obj, MatchesStructure.byEquality(**data))
def test_sourcepackage_target(self): sp = self.factory.makeSourcePackage() task = self.factory.makeBugTask(target=sp) flat = self.getBugTaskFlat(task) self.assertThat( flat, MatchesStructure.byEquality( product=None, productseries=None, distribution=None, distroseries=sp.distroseries.id, sourcepackagename=sp.sourcepackagename.id, active=True))
def test_creates_commissioning_script(self): content = factory.getRandomString() name = factory.make_name('filename') uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertTrue(form.is_valid(), form._errors) form.save() new_script = CommissioningScript.objects.get(name=name) self.assertThat( new_script, MatchesStructure.byEquality(name=name, content=content))
def test_new_creates_nodegroup_with_given_dhcp_settings(self): name = factory.make_name('nodegroup') uuid = factory.make_name('uuid') dhcp_network, dhcp_settings = make_dhcp_settings() ip = factory.getRandomIPInNetwork(dhcp_network) nodegroup = NodeGroup.objects.new(name, uuid, ip, **dhcp_settings) nodegroup = reload_object(nodegroup) interface = get_one(nodegroup.nodegroupinterface_set.all()) self.assertEqual(name, nodegroup.name) self.assertThat( interface, MatchesStructure.byEquality(**dhcp_settings))
def test_add_duplicate_work_item(self): spec, work_items = self._dup_work_items_set_up() # Test that we can insert another duplicate work item. new_wi3_data = work_items[0].copy() new_wi3_data['sequence'] = 4 work_items.append(new_wi3_data) spec.updateWorkItems(work_items) self.assertEqual(5, len(spec.work_items)) for data, obj in zip(work_items, list(spec.work_items)): self.assertThat(obj, MatchesStructure.byEquality(**data))
def test_arg_setup(self): parser = self.get_parser() filename = factory.getRandomString() args = parser.parse_args(( '--no-overwrite', '--filename', filename, '--mode', "111")) self.assertThat( args, MatchesStructure.byEquality( no_overwrite=True, filename=filename, mode="111"))
def test__whoami(self): username = make_name_without_spaces("username") email = make_name_without_spaces("user@") is_admin = pick_bool() Users = make_origin().Users Users._handler.whoami.return_value = { "username": username, "email": email, "is_superuser": is_admin} user = Users.whoami() self.assertThat(user, MatchesStructure.byEquality( username=username, email=email, is_admin=is_admin))
def test_creates_interface_from_params(self): name = factory.make_name('name') uuid = factory.getRandomUUID() interface = make_interface_settings() interfaces = json.dumps([interface]) form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertTrue(form.is_valid(), form._errors) form.save() nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertThat( nodegroup.nodegroupinterface_set.all()[0], MatchesStructure.byEquality(**interface))
def test_inject_result_stores_data(self): node = factory.make_node() name = factory.make_name("result") output = factory.getRandomBytes() exit_status = next(factory.random_octets) inject_result(node, name, output, exit_status) self.assertThat( NodeCommissionResult.objects.get(node=node, name=name), MatchesStructure.byEquality( node=node, name=name, script_result=exit_status, data=output))
def test_can_edit_cluster(self): nodegroup = factory.make_node_group() edit_link = reverse('cluster-edit', args=[nodegroup.uuid]) data = { 'cluster_name': factory.make_name('cluster_name'), 'name': factory.make_name('name'), 'status': factory.getRandomEnum(NODEGROUP_STATUS), } response = self.client.post(edit_link, data) self.assertEqual(httplib.FOUND, response.status_code, response.content) self.assertThat( reload_object(nodegroup), MatchesStructure.byEquality(**data))
def test_ensure_master_creates_minimal_interface(self): master = NodeGroup.objects.ensure_master() interface = get_one(master.nodegroupinterface_set.all()) self.assertThat( interface, MatchesStructure.byEquality( ip='127.0.0.1', subnet_mask=None, broadcast_ip=None, router_ip=None, ip_range_low=None, ip_range_high=None, ))
def test_arg_setup(self): parser = ArgumentParser() writer.add_arguments(parser) args = parser.parse_args(self.test_args) self.assertThat( args, MatchesStructure.byEquality( subnet='subnet', subnet_mask='subnet-mask', broadcast_ip='broadcast-ip', dns_servers='dns-servers', router_ip='router-ip', omapi_key='omapi-key', ip_range_low='ip-range-low', ip_range_high='ip-range-high'))
def test_make_ipvN_range_creates_random_network_if_not_supplied(self): self.patch_autospec(factory, "make_ip_range") factory.make_ip_range.return_value = sentinel.ip_range ip_range = self.make_range() self.assertThat(ip_range, Is(sentinel.ip_range)) self.assertThat( factory.make_ip_range, MockCalledOnceWith(network=Matches( MatchesAll( IsInstance(IPNetwork), MatchesStructure.byEquality(version=self.version), first_only=True, ), ), ), )
def test_register_is_safe_with_concurrency(self): name = factory.make_name("name") desc = factory.make_name("desc") level = random.choice(list(LOGGING_LEVELS)) # A list to store the event types that are being registered, and a # lock to synchronise write access to it. event_types_lock = threading.Lock() event_types = [] # Use the transactional decorator to do two things: retry when there's # an IntegrityError, and ensure that old connections are closed in the # threads we're spawning. If we don't do the latter Django gets angry. @transactional def make_event_type(): # Create the event type then wait a short time to increase the # chances that transactions between threads overlap. etype = EventType.objects.register(name, desc, level) time.sleep(0.1) return etype # Only save the event type when the txn that make_event_type() runs is # has been committed. This is when we're likely to see errors. def make_event_type_in_thread(): event_type = make_event_type() with event_types_lock: event_types.append(event_type) # Create a number of threads to simulate a race. threads = [ threading.Thread(target=make_event_type_in_thread) for _ in range(5) ] # Start all the threads at the same time. for thread in threads: thread.start() # Now wait for them all to finish. for thread in threads: thread.join() # All threads return the same event type. self.expectThat(len(threads), Equals(len(event_types))) self.expectThat( event_types, AllMatch( MatchesStructure.byEquality(name=name, description=desc, level=level)), )
def test_run(self): # The job requests builds and records the result. distroseries, processors = self.makeSeriesAndProcessors( ["avr2001", "sparc64", "x32"]) [git_ref] = self.factory.makeGitRefs() snap = self.factory.makeSnap( git_ref=git_ref, distroseries=distroseries, processors=processors) expected_date_created = get_transaction_timestamp(IStore(snap)) job = SnapRequestBuildsJob.create( snap, snap.registrant, distroseries.main_archive, PackagePublishingPocket.RELEASE, {"core": "stable"}) snapcraft_yaml = dedent("""\ architectures: - build-on: avr2001 - build-on: x32 """) self.useFixture(GitHostingFixture(blob=snapcraft_yaml)) with dbuser(config.ISnapRequestBuildsJobSource.dbuser): JobRunner([job]).runAll() now = get_transaction_timestamp(IStore(snap)) self.assertEmailQueueLength(0) self.assertThat(job, MatchesStructure( job=MatchesStructure.byEquality(status=JobStatus.COMPLETED), date_created=Equals(expected_date_created), date_finished=MatchesAll( GreaterThan(expected_date_created), LessThan(now)), error_message=Is(None), builds=AfterPreprocessing(set, MatchesSetwise(*[ MatchesStructure( build_request=MatchesStructure.byEquality(id=job.job.id), requester=Equals(snap.registrant), snap=Equals(snap), archive=Equals(distroseries.main_archive), distro_arch_series=Equals(distroseries[arch]), pocket=Equals(PackagePublishingPocket.RELEASE), channels=Equals({"core": "stable"})) for arch in ("avr2001", "x32")]))))
def test_no_data(self): raw = codecs.decode('123456790000ffff2702efbeaddefeffffff0005b320', 'hex') msg = solaredge.proto.decode_message(raw) self.assertThat( msg, MatchesStructure.byEquality(seq=0x227, addr_from=0xdeadbeef, addr_to=0xfffffffe, type=0x500, data=b'')) self.assertEqual( '<Message seq=0x0227, addr_from=0xdeadbeef, addr_to=0xfffffffe, ' 'type=0x0500, data=[0 bytes]>', repr(msg)) self.assertEqual(raw, solaredge.proto.encode_message(msg))
def test_can_create_cluster_interface(self): nodegroup = factory.make_node_group( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) create_link = reverse( 'cluster-interface-create', args=[nodegroup.uuid]) data = factory.get_interface_fields() response = self.client.post(create_link, data) self.assertEqual( (httplib.FOUND, reverse('cluster-edit', args=[nodegroup.uuid])), (response.status_code, extract_redirect(response))) interface = NodeGroupInterface.objects.get( nodegroup__uuid=nodegroup.uuid, interface=data['interface']) self.assertThat( reload_object(interface), MatchesStructure.byEquality(**data))
def test_update_services_updates_all_services(self): services = { service: self.make_service(service) for service in RACK_SERVICES } rack_controller = factory.make_RackController() update_services(rack_controller.system_id, services.values()) for service in RACK_SERVICES: self.expectThat( Service.objects.get(node=rack_controller, name=service), MatchesStructure.byEquality( status=services[service]["status"], status_info=services[service]["status_info"], ), )
def assertWarningsEnabled(self, category): message = "%s from %s" % (category.__name__, self.package_name) filename, ext = splitext(__file__) with catch_warnings(record=True) as log: warn(message, category=category) self.assertThat(log, MatchesListwise([ MatchesStructure( message=MatchesAll( IsInstance(category), MatchesStructure.byEquality(args=(message,)), ), category=Equals(category), filename=StartsWith(filename), ), ]))
def assertWarningsEnabled(self, category): message = "%s from %s" % (category.__name__, self.package_name) with catch_warnings(record=True) as log: self.warn(message, category=category) self.assertThat( log, MatchesListwise([ MatchesStructure( message=MatchesAll( IsInstance(category), MatchesStructure.byEquality(args=(message, )), ), category=Equals(category), ), ]))
def test_issueMacaroon_via_authserver(self): build = self.factory.makeSnapBuild(snap=self.factory.makeSnap( private=True)) private_root = getUtility(IPrivateApplication) authserver = AuthServerAPIView(private_root.authserver, TestRequest()) macaroon = Macaroon.deserialize( authserver.issueMacaroon("snap-build", "SnapBuild", build.id)) self.assertThat( macaroon, MatchesStructure(location=Equals("launchpad.dev"), identifier=Equals("snap-build"), caveats=MatchesListwise([ MatchesStructure.byEquality( caveat_id="lp.snap-build %s" % build.id), ])))
def test_getByBothSeries(self): dses = [self.factory.makeDistroSeries() for _ in range(2)] snappy_serieses = [self.factory.makeSnappySeries() for _ in range(2)] snappy_serieses[0].usable_distro_series = [dses[0]] sds_set = getUtility(ISnappyDistroSeriesSet) self.assertThat( sds_set.getByBothSeries(snappy_serieses[0], dses[0]), MatchesStructure.byEquality( snappy_series=snappy_serieses[0], distro_series=dses[0], title="%s, for %s" % (dses[0].fullseriesname, snappy_serieses[0].title))) self.assertIsNone(sds_set.getByBothSeries(snappy_serieses[0], dses[1])) self.assertIsNone(sds_set.getByBothSeries(snappy_serieses[1], dses[0])) self.assertIsNone(sds_set.getByBothSeries(snappy_serieses[1], dses[1])) self.assertIsNone(sds_set.getByBothSeries(snappy_serieses[0], None)) self.assertIsNone(sds_set.getByBothSeries(snappy_serieses[1], None)) snappy_serieses[0].can_infer_distro_series = True self.assertThat( sds_set.getByBothSeries(snappy_serieses[0], None), MatchesStructure.byEquality(snappy_series=snappy_serieses[0], distro_series=None, title=snappy_serieses[0].title)) self.assertIsNone(sds_set.getByBothSeries(snappy_serieses[1], None))
def test_updateWorkItems_marks_removed_ones_as_deleted(self): spec = self.factory.makeSpecification() self._createWorkItemAndReturnDataDict(spec) wi2_data = self._createWorkItemAndReturnDataDict(spec) self.assertEqual(2, len(spec.work_items)) login_person(spec.owner) # We have two work items in the DB but now we want to update them to # keep just the second one. The first will be deleted and the sequence # of the second will be changed. spec.updateWorkItems([wi2_data]) self.assertEqual(1, len(spec.work_items)) wi2_data['sequence'] = 0 self.assertThat( spec.work_items[0], MatchesStructure.byEquality(**wi2_data))
def test_create_or_update_for_bcache_creates_block_device(self): # This will create the filesystem group and a virtual block device. filesystem_group = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.BCACHE ) self.assertThat( filesystem_group.virtual_device, MatchesStructure.byEquality( name=filesystem_group.name, size=filesystem_group.get_size(), block_size=( filesystem_group.get_virtual_block_device_block_size() ), ), )
def test__options_adds_options(self): select = SelectBucket() parser = OptionParser() select.options(parser=parser, env={}) self.assertThat( parser.option_list[-2:], MatchesListwise([ # The --with-select-bucket option. MatchesStructure.byEquality( action="store_true", default=None, dest="enable_plugin_select_bucket", ), # The --select-bucket option. MatchesStructure.byEquality( action="callback", default=None, dest="select-bucket_selected_bucket", metavar="BUCKET/BUCKETS", type="string", _short_opts=[], _long_opts=["--select-bucket"], ) ]))
def test__options_adds_options(self): select = Select() parser = OptionParser() select.options(parser=parser, env={}) self.assertThat( parser.option_list[-2:], MatchesListwise([ # The --with-select option. MatchesStructure.byEquality( action="store_true", default=None, dest="enable_plugin_select", ), # The --select-dir/--select-directory option. MatchesStructure.byEquality( action="append", default=[], dest="select_dirs", metavar="DIR", type="string", _short_opts=[], _long_opts=["--select-dir", "--select-directory"], ) ]))
def assertMacaroonVerifies(self, issuer, macaroon, context, **kwargs): errors = [] try: verified = issuer.verifyMacaroon(macaroon, context, errors=errors, **kwargs) self.assertIsNotNone(verified) self.assertThat( verified, MatchesStructure.byEquality(issuer_name=issuer.identifier)) except Exception: if errors: self.addDetail("errors", text_content("\n".join(errors))) raise
def test__options_adds_options(self): select = Subunit() parser = OptionParser() select.options(parser=parser, env={}) self.assertThat( parser.option_list[-2:], MatchesListwise([ # The --with-subunit option. MatchesStructure.byEquality( action="store_true", default=None, dest="enable_plugin_subunit", ), # The --subunit-fd option. MatchesStructure.byEquality( action="store", default=1, dest="subunit_fd", metavar="FD", type="int", _short_opts=[], _long_opts=["--subunit-fd"], ) ]))
def test__init(self): limit = random.randint(1, 1000) service = DatabaseTasksService(limit) self.assertThat( service, MatchesStructure( # Our requested limit is saved. limit=Equals(limit), # The queue does not permit anything to go in it. queue=MatchesAll( IsInstance(DeferredQueue), MatchesStructure.byEquality(size=0, backlog=1), first_only=True, ), ))
def test_update_sets_regiond_running_with_all_workers(self): yield deferToDatabase(load_builtin_scripts) master = self.make_IPCMasterService() yield master.startService() pids = set() while len(pids) < workers.MAX_WORKERS_COUNT: pids.add(random.randint(1, 512)) for pid in pids: yield master.registerWorker(pid, MagicMock()) # The service status for the region should now be running. regiond_service = yield deferToDatabase(self.getRegiondService) self.assertThat( regiond_service, MatchesStructure.byEquality(status=SERVICE_STATUS.RUNNING, status_info=""), ) # Delete all the processes and an update should re-create them # and the service status should still be running. def delete_all(): region = RegionController.objects.get_running_controller() region.processes.all().delete() yield deferToDatabase(delete_all) yield master.update() regiond_service = yield deferToDatabase(self.getRegiondService) self.assertThat( regiond_service, MatchesStructure.byEquality(status=SERVICE_STATUS.RUNNING, status_info=""), ) yield master.stopService()
def test_properties(self): owner = self.factory.makeTeam() member = self.factory.makePerson(member_of=[owner]) repository = self.factory.makeGitRepository(owner=owner) rule = self.factory.makeGitRule(repository=repository, ref_pattern="refs/heads/stable/*", creator=member) now = get_transaction_timestamp(Store.of(rule)) self.assertThat( rule, MatchesStructure.byEquality(repository=repository, ref_pattern="refs/heads/stable/*", creator=member, date_created=now, date_last_modified=now))
def test_can_create_commissioning_script(self): content = factory.getRandomString() name = factory.make_name('filename') create_link = reverse('commissioning-script-add') filepath = self.make_file(name=name, contents=content) with open(filepath) as fp: response = self.client.post( create_link, {'name': name, 'content': fp}) self.assertEqual( (httplib.FOUND, reverse('settings')), (response.status_code, extract_redirect(response))) new_script = CommissioningScript.objects.get(name=name) self.assertThat( new_script, MatchesStructure.byEquality(name=name, content=content))
def test_properties(self): repository = self.factory.makeGitRepository() changee = self.factory.makePerson() activity = GitActivity( repository, repository.owner, GitActivityType.RULE_ADDED, changee=changee, old_value={"old": None}, new_value={"new": None}) now = get_transaction_timestamp(Store.of(activity)) self.assertThat(activity, MatchesStructure.byEquality( repository=repository, date_changed=now, changer=repository.owner, changee=changee, what_changed=GitActivityType.RULE_ADDED, old_value={"old": None}, new_value={"new": None}))
def test_update_service_for_updates_service_status_and_info(self): controller = factory.make_RegionController() Service.objects.create_services_for(controller) service = random.choice(list(REGION_SERVICES)) status = factory.pick_choice(SERVICE_STATUS_CHOICES) info = factory.make_name("info") observed = Service.objects.update_service_for(controller, service, status, info) self.assertThat( reload_object(observed), MatchesStructure.byEquality(node=controller, name=service, status=status, status_info=info), )
def test__fabric_read(self): Fabric = make_origin().Fabric fabric = { "id": random.randint(0, 100), "name": make_string_without_spaces(), "class_type": make_string_without_spaces(), "vlans": [{ "id": 1 }, { "id": 2 }], } Fabric._handler.read.return_value = fabric self.assertThat(Fabric.read(id=fabric["id"]), Equals(Fabric(fabric))) Fabric._handler.read.assert_called_once_with(id=fabric["id"]) self.assertThat( Fabric(fabric).vlans, MatchesSetwise( MatchesAll(IsInstance(Vlan), MatchesStructure.byEquality(id=1)), MatchesAll(IsInstance(Vlan), MatchesStructure.byEquality(id=2)), ), )
def test__creates_subnet_in_default_fabric_with_vid(self): vlan = factory.make_VLAN(fabric=Fabric.objects.get_default_fabric()) network = factory.make_ip4_or_6_network() cidr = str(network.cidr) form = SubnetForm({ "cidr": cidr, "vid": vlan.vid, "vlan": None, }) self.assertTrue(form.is_valid(), dict(form.errors)) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality(name=cidr, cidr=cidr, vlan=vlan))
def test_registerWorker_sets_regiond_degraded_with_less_than_workers(self): master = self.make_IPCMasterService() yield master.startService() pid = random.randint(1, 512) yield master.registerWorker(pid, MagicMock()) regiond_service = yield deferToDatabase(self.getRegiondService) self.assertThat(regiond_service, MatchesStructure.byEquality( status=SERVICE_STATUS.DEGRADED, status_info="1 process running but %s were expected." % ( workers.MAX_WORKERS_COUNT))) yield master.stopService()
def test_register_event_type_creates_object(self): name = factory.make_name('name') description = factory.make_name('description') level = random.randint(0, 100) response = yield call_responder( Region(), RegisterEventType, {'name': name, 'description': description, 'level': level}) self.assertEqual({}, response) event_type = yield deferToDatabase(self.get_event_type, name) self.assertThat( event_type, MatchesStructure.byEquality( name=name, description=description, level=level) )
def test_updateWorkItems_no_existing_items(self): """When there are no existing work items, updateWorkItems will create a new entry for every element in the list given to it. """ spec = self.factory.makeSpecification( product=self.factory.makeProduct()) milestone = self.factory.makeMilestone(product=spec.product) work_item1_data = dict( title=u'Foo Bar', status=SpecificationWorkItemStatus.DONE, assignee=spec.owner, milestone=None) work_item2_data = dict( title=u'Bar Foo', status=SpecificationWorkItemStatus.TODO, assignee=None, milestone=milestone) # We start with no work items. self.assertEquals([], list(spec.work_items)) login_person(spec.owner) spec.updateWorkItems([work_item1_data, work_item2_data]) # And after calling updateWorkItems() we have 2 work items. self.assertEqual(2, len(spec.work_items)) # The data dicts we pass to updateWorkItems() have no sequence because # that's taken from their position on the list, so we update our data # dicts with the sequence we expect our work items to have. work_item1_data['sequence'] = 0 work_item2_data['sequence'] = 1 # Assert that the work items ultimately inserted in the DB are exactly # what we expect them to be. created_wi1, created_wi2 = list(spec.work_items) self.assertThat( created_wi1, MatchesStructure.byEquality(**work_item1_data)) self.assertThat( created_wi2, MatchesStructure.byEquality(**work_item2_data))
def test_upload_needs_discharge_macaroon_refresh(self): store_secrets = self._make_store_secrets() snapbuild = self.makeUploadableSnapBuild(store_secrets=store_secrets) transaction.commit() self._addUnscannedUploadResponse() responses.add( "POST", "http://sca.example/dev/api/snap-push/", status=401, headers={"WWW-Authenticate": "Macaroon needs_refresh=1"}) self._addMacaroonRefreshResponse() self._addSnapPushResponse() with dbuser(config.ISnapStoreUploadJobSource.dbuser): self.assertEqual( "http://sca.example/dev/api/snaps/1/builds/1/status", self.client.upload(snapbuild)) requests = [call.request for call in responses.calls] self.assertThat(requests, MatchesListwise([ MatchesStructure.byEquality(path_url="/unscanned-upload/"), MatchesStructure.byEquality(path_url="/dev/api/snap-push/"), MatchesStructure.byEquality(path_url="/api/v2/tokens/refresh"), MatchesStructure.byEquality(path_url="/dev/api/snap-push/"), ])) self.assertNotEqual( store_secrets["discharge"], snapbuild.snap.store_secrets["discharge"])
def test_create(self): user = factory.make_User() factory.make_Subnet(cidr="192.168.0.0/24") handler = IPRangeHandler(user, {}, None) ip_range = handler.create({ "type": "reserved", "start_ip": "192.168.0.10", "end_ip": "192.168.0.20", }) self.assertThat( IPRange.objects.get(id=ip_range["id"]), MatchesStructure.byEquality(type="reserved", start_ip="192.168.0.10", end_ip="192.168.0.20"), )
def test_create_multiple_jobs_structure(self): dsp = self.factory.makeDistroSeriesParent() spph = self.createSPPHs(dsp.derived_series, 1)[0] job_ids = create_multiple_jobs( dsp.derived_series, dsp.parent_series) job = bulk.load(DistributionJob, job_ids)[0] sourcepackagenameid = spph.sourcepackagerelease.sourcepackagename.id expected_metadata = { u'sourcepackagename': sourcepackagenameid, u'parent_series': dsp.parent_series.id} self.assertThat(job, MatchesStructure.byEquality( distribution=dsp.derived_series.distribution, distroseries=dsp.derived_series, job_type=DistributionJobType.DISTROSERIESDIFFERENCE, metadata=expected_metadata))
def test_branch_sharing_policy_non_commercial(self): # An API attempt to set a commercial-only branch_sharing_policy # on a non-commercial project returns Forbidden. product = self.factory.makeProduct() owner = product.owner webservice = webservice_for_person( product.owner, permission=OAuthPermission.WRITE_PRIVATE) response = self.patch( webservice, product, branch_sharing_policy='Proprietary') self.assertThat(response, MatchesStructure.byEquality( status=403, body=('A current commercial subscription is required to use ' 'proprietary branches.'))) with person_logged_in(owner): self.assertEqual( BranchSharingPolicy.PUBLIC, product.branch_sharing_policy)
def test__creates_subnet_in_default_vlan_in_fabric(self): fabric = factory.make_Fabric() network = factory.make_ip4_or_6_network() cidr = str(network.cidr) form = SubnetForm({ "cidr": cidr, "fabric": fabric.id, "vlan": None, }) self.assertTrue(form.is_valid(), dict(form.errors)) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality(name=cidr, cidr=cidr, vlan=fabric.get_default_vlan()))
def test__doesnt_overwrite_other_fields(self): new_name = factory.make_name("subnet") subnet = factory.make_Subnet() form = SubnetForm(instance=subnet, data={ "name": new_name, }) self.assertTrue(form.is_valid(), dict(form.errors)) form.save() subnet = reload_object(subnet) self.assertThat( subnet, MatchesStructure.byEquality(name=new_name, vlan=subnet.vlan, cidr=subnet.cidr, gateway_ip=subnet.gateway_ip, dns_servers=subnet.dns_servers))
def test_process_not_OK_response(self): response = factory.make_response(http.client.NOT_FOUND, b"", "application/json") response.url = factory.make_string() error = self.assertRaises(urllib.error.HTTPError, tags.process_response, response) self.assertThat( error, MatchesStructure.byEquality( url=response.url, code=response.code, msg="Not Found, expected 200 OK", headers=response.headers, fp=response.fp, ), )
def test_marshall_from_json_data(self): self.useFixture(ZopeAdapterFixture(inline_example_from_dict)) field = InlineObject(schema=IInlineExample) request = WebServiceTestRequest() request.setVirtualHostRoot(names=["devel"]) marshaller = InlineObjectFieldMarshaller(field, request) person = self.factory.makePerson() data = { "person_link": canonical_url(person, request=request), "status": "Running", } obj = marshaller.marshall_from_json_data(data) self.assertThat( obj, MatchesStructure.byEquality(person=person, status=JobStatus.RUNNING))