class ExtraNetworkClientTests(TestCase): """ Direct tests for ``_NetworkClient`` that go beyond the guarantees of ``IKubernetesClient``. """ @capture_logging( lambda self, logger: self.expectThat( logger.messages, AnyMatch( ContainsDict({ u"action_type": Equals(u"network-client:list"), u"apiVersion": Equals(u"v1"), u"kind": Equals(u"Pod"), })), ), ) def test_list_logging(self, logger): """ ``_NetworkClient.list`` logs an Eliot event describing its given type. """ client = network_kubernetes( base_url=URL.fromText(u"http://127.0.0.1/"), agent=Agent(MemoryReactorClock()), ).client() client.list(v1.Pod)
def check_namespaces(namespaces): self.assertThat(namespaces, IsInstance(v1.NamespaceList)) # There are some built-in namespaces that we'll ignore. If we # find the one we created, that's sufficient. self.assertThat( namespaces.items, AnyMatch( MatchesAll(matches_namespace(obj), has_uid(), is_active())), )
def test_log_error(self, logger): """ ``log_error`` encodes module, function, and line number in the message_type, and passes other keyword arguments onto the message structure. """ frame = getframeinfo(currentframe()) log_error(key='VAL') line_no = frame.lineno + 1 self.assertThat( logger.messages, AnyMatch( _dict_values_match( message_type=ContainsAll( [__name__, u'test_log_error', unicode(line_no)]), key=Equals('VAL') ) ) )
def check(collection): self.assertThat(collection, IsInstance(list_cls)) self.assertThat(collection.items, AnyMatch(matches(expected)))
def test_duplicated_calls(self): """ Verify that if every call to the :class:`GCEOperations` is duplicated that we handle the errors correctly. This should force some specific scheduling situations that resemble race conditions with another agent trying to converge to the same state, or a condition where the dataset agent as rebooted after a crash that happened in the middle of an :class:`IBlockDeviceAPI` call. In these situations we should verify that the second call to many of the underlying atomic methods would result in the correct underlying :class:`VolumeException`. """ actual_api = gceblockdeviceapi_for_test(self) operations = actual_api._operations api = actual_api.set( '_operations', repeat_call_proxy_for(IGCEOperations, operations) ) dataset_id = uuid4() # There is no :class:`VolumeException` for creating an already created # volume. Thus, GCE just raises its own custom exception in that case. self.assertThat( lambda: api.create_volume( dataset_id=dataset_id, size=get_minimum_allocatable_size() ), Raises(MatchesException(GCEVolumeException)) ) volumes = api.list_volumes() self.assertThat( volumes, AnyMatch(MatchesStructure(dataset_id=Equals(dataset_id))) ) volume = next(v for v in volumes if v.dataset_id == dataset_id) compute_instance_id = api.compute_instance_id() self.assertThat( lambda: api.attach_volume( blockdevice_id=volume.blockdevice_id, attach_to=compute_instance_id, ), Raises(MatchesException(AlreadyAttachedVolume)) ) self.assertThat( api.get_device_path(volume.blockdevice_id).path, Contains('/dev/sd') ) # Detach volume does not error out because we have cleanup code in our # acceptance tests that assumes that calls to detach_volume while the # volume is already being detached do not error out, and instead block # until the volume is detached. # # With the repeat call proxy, this manifests as neither call reporting # the unattached volume, but both calls merely block until the # blockdevice is detached. api.detach_volume( blockdevice_id=volume.blockdevice_id, ) self.assertThat( lambda: api.destroy_volume( blockdevice_id=volume.blockdevice_id, ), Raises(MatchesException(UnknownVolume)) ) self.assertThat( api.list_volumes(), AllMatch(Not(MatchesStructure(dataset_id=Equals(dataset_id)))) )