예제 #1
0
 def test_delete_pod_name_raises(self, mock_get_namespace, mock_client):
     mock_client.rest.ApiException = Exception
     mock_client.CoreV1Api.return_value.delete_namespaced_pod.side_effect = ApiException
     kc = KubernetesClient()
     with self.assertRaisesRegex(CalrissianJobException,
                                 'Error deleting pod named pod-123'):
         kc.delete_pod_name('pod-123')
예제 #2
0
 def test_should_delete_pod_reads_env(self, mock_os, mock_get_namespace,
                                      mock_client):
     mock_os.getenv.return_value = 'NO'
     kc = KubernetesClient()
     self.assertFalse(kc.should_delete_pod())
     self.assertEqual(mock_os.getenv.call_args,
                      call('CALRISSIAN_DELETE_PODS', ''))
예제 #3
0
 def test_is_terminated(self):
     self.assertFalse(
         KubernetesClient.state_is_terminated(self.running_state))
     self.assertFalse(
         KubernetesClient.state_is_terminated(self.waiting_state))
     self.assertTrue(
         KubernetesClient.state_is_terminated(self.terminated_state))
예제 #4
0
파일: job.py 프로젝트: x-native/calrissian
 def __init__(self, *args, **kwargs):
     super(CalrissianCommandLineJob, self).__init__(*args, **kwargs)
     self.client = KubernetesClient()
     volume_builder = KubernetesVolumeBuilder()
     volume_builder.add_persistent_volume_entries_from_pod(
         self.client.get_current_pod())
     self.volume_builder = volume_builder
예제 #5
0
 def test_get_pod_for_name_not_found(self, mock_get_namespace, mock_client):
     mock_client.CoreV1Api.return_value.list_namespaced_pod.return_value = Mock(
         items=[])
     kc = KubernetesClient()
     with self.assertRaisesRegex(CalrissianJobException,
                                 'Unable to find pod with name somepod'):
         kc.get_pod_for_name('somepod')
예제 #6
0
 def test_get_current_pod_missing_env_var(self, mock_os, mock_get_namespace,
                                          mock_client):
     mock_os.environ = {}
     kc = KubernetesClient()
     with self.assertRaisesRegex(
             CalrissianJobException,
             'Missing required environment variable \$CALRISSIAN_POD_NAME'):
         kc.get_current_pod()
예제 #7
0
 def test_get_pod_for_name_multiple_found(self, mock_get_namespace,
                                          mock_client):
     mock_client.CoreV1Api.return_value.list_namespaced_pod.return_value = Mock(
         items=['pod1', 'pod2'])
     kc = KubernetesClient()
     with self.assertRaisesRegex(CalrissianJobException,
                                 'Multiple pods found with name somepod'):
         kc.get_pod_for_name('somepod')
예제 #8
0
 def test_get_current_pod_missing_env_var(self, mock_os, mock_get_namespace,
                                          mock_client):
     mock_os.environ = {}
     kc = KubernetesClient()
     with self.assertRaises(CalrissianJobException) as raised_exception:
         kc.get_current_pod()
     self.assertEqual(
         str(raised_exception.exception),
         'Missing required environment variable $CALRISSIAN_POD_NAME')
예제 #9
0
 def test_delete_pod_name_ignores_404(self, mock_get_namespace,
                                      mock_client):
     mock_client.CoreV1Api.return_value.delete_namespaced_pod.side_effect = ApiException(
         status=404)
     kc = KubernetesClient()
     kc.delete_pod_name('pod-123')
     self.assertEqual(
         'pod-123', mock_client.CoreV1Api.return_value.
         delete_namespaced_pod.call_args[0][0])
예제 #10
0
 def test_get_current_pod_with_env_var(self, mock_os, mock_get_namespace,
                                       mock_client):
     mock_client.CoreV1Api.return_value.list_namespaced_pod.return_value = Mock(
         items=[{
             'name': 'mypod'
         }])
     mock_os.environ = {'CALRISSIAN_POD_NAME': 'mypod'}
     kc = KubernetesClient()
     pod = kc.get_current_pod()
     self.assertEqual(pod, {'name': 'mypod'})
     mock_client.CoreV1Api.return_value.list_namespaced_pod.assert_called_with(
         mock_get_namespace.return_value,
         field_selector='metadata.name=mypod')
예제 #11
0
 def test_init(self, mock_get_namespace, mock_client):
     kc = KubernetesClient()
     self.assertEqual(kc.namespace, mock_get_namespace.return_value)
     self.assertEqual(kc.core_api_instance,
                      mock_client.CoreV1Api.return_value)
     self.assertIsNone(kc.pod)
     self.assertIsNone(kc.completion_result)
예제 #12
0
 def test_submit_pod(self, mock_podmonitor, mock_get_namespace,
                     mock_client):
     mock_get_namespace.return_value = 'namespace'
     mock_create_namespaced_pod = Mock()
     mock_create_namespaced_pod.return_value = Mock(metadata=Mock(
         uid='123'))
     mock_client.CoreV1Api.return_value.create_namespaced_pod = mock_create_namespaced_pod
     kc = KubernetesClient()
     mock_body = Mock()
     kc.submit_pod(mock_body)
     self.assertEqual(kc.pod.metadata.uid, '123')
     self.assertEqual(mock_create_namespaced_pod.call_args,
                      call('namespace', mock_body))
     # This is to inspect `with PodMonitor() as monitor`:
     self.assertTrue(
         mock_podmonitor.return_value.__enter__.return_value.add.called)
예제 #13
0
 def test_wait_finishes_when_pod_state_is_terminated(
         self, mock_cpu_memory, mock_podmonitor, mock_watch,
         mock_get_namespace, mock_client):
     mock_pod = create_autospec(V1Pod)
     mock_pod.status.container_statuses[0].state = Mock(
         running=None, waiting=None, terminated=Mock(exit_code=123))
     mock_cpu_memory.return_value = ('1', '1Mi')
     self.setup_mock_watch(mock_watch, [mock_pod])
     kc = KubernetesClient()
     kc._set_pod(Mock())
     completion_result = kc.wait_for_completion()
     self.assertEqual(completion_result.exit_code, 123)
     self.assertTrue(mock_watch.Watch.return_value.stop.called)
     self.assertTrue(
         mock_client.CoreV1Api.return_value.delete_namespaced_pod.called)
     self.assertIsNone(kc.pod)
     # This is to inspect `with PodMonitor() as monitor`:
     self.assertTrue(
         mock_podmonitor.return_value.__enter__.return_value.remove.called)
예제 #14
0
 def test_wait_checks_should_delete_when_pod_state_is_terminated(
         self, mock_cpu_memory, mock_should_delete_pod, mock_watch,
         mock_get_namespace, mock_client):
     mock_pod = create_autospec(V1Pod)
     mock_pod.status.container_statuses[0].state = Mock(
         running=None, waiting=None, terminated=Mock(exit_code=123))
     mock_cpu_memory.return_value = ('1', '1Mi')
     mock_should_delete_pod.return_value = False
     self.setup_mock_watch(mock_watch, [mock_pod])
     kc = KubernetesClient()
     kc._set_pod(Mock())
     completion_result = kc.wait_for_completion()
     self.assertEqual(completion_result.exit_code, 123)
     self.assertEqual(completion_result.memory, '1Mi')
     self.assertEqual(completion_result.cpus, '1')
     self.assertTrue(mock_watch.Watch.return_value.stop.called)
     self.assertFalse(
         mock_client.CoreV1Api.return_value.delete_namespaced_pod.called)
     self.assertIsNone(kc.pod)
예제 #15
0
 def test_follow_logs_streams_to_logging(self, mock_log, mock_get_namespace,
                                         mock_client):
     mock_get_namespace.return_value = 'logging-ns'
     mock_read = mock_client.CoreV1Api.return_value.read_namespaced_pod_log
     mock_read.return_value.stream.return_value = [b'line1\n', b'line2\n']
     mock_pod = self.make_mock_pod('logging-pod-123')
     kc = KubernetesClient()
     kc._set_pod(mock_pod)
     mock_log.reset_mock(
     )  # log will have other calls before calling follow_logs()
     kc.follow_logs()
     self.assertTrue(mock_read.called)
     self.assertEqual(
         mock_read.call_args,
         call('logging-pod-123',
              'logging-ns',
              follow=True,
              _preload_content=False))
     self.assertEqual(
         mock_log.debug.mock_calls,
         [call('[logging-pod-123] line1'),
          call('[logging-pod-123] line2')])
     self.assertEqual(mock_log.info.mock_calls, [
         call('[logging-pod-123] follow_logs start'),
         call('[logging-pod-123] follow_logs end')
     ])
예제 #16
0
 def test_raises_on_set_second_pod(self, mock_get_namespace, mock_client):
     kc = KubernetesClient()
     kc._set_pod(Mock())
     with self.assertRaises(CalrissianJobException) as context:
         kc._set_pod(Mock())
     self.assertIn('This client is already observing pod',
                   str(context.exception))
예제 #17
0
 def test_wait_skips_pod_when_status_is_none(self, mock_watch,
                                             mock_get_namespace,
                                             mock_client):
     mock_pod = Mock(status=Mock(container_statuses=None))
     self.setup_mock_watch(mock_watch, [mock_pod])
     kc = KubernetesClient()
     kc._set_pod(Mock())
     kc.wait_for_completion()
     self.assertFalse(mock_watch.Watch.return_value.stop.called)
     self.assertFalse(
         mock_client.CoreV1Api.return_value.delete_namespaced_pod.called)
     self.assertIsNotNone(kc.pod)
예제 #18
0
 def test_wait_raises_exception_when_state_is_unexpected(
         self, mock_watch, mock_get_namespace, mock_client):
     mock_pod = create_autospec(V1Pod)
     mock_pod.status.container_statuses[0].state = Mock(running=None,
                                                        waiting=None,
                                                        terminated=None)
     self.setup_mock_watch(mock_watch, [mock_pod])
     kc = KubernetesClient()
     kc._set_pod(Mock())
     with self.assertRaisesRegex(CalrissianJobException,
                                 'Unexpected pod container status'):
         kc.wait_for_completion()
예제 #19
0
 def test_wait_calls_watch_pod_with_pod_name_field_selector(
         self, mock_watch, mock_get_namespace, mock_client):
     self.setup_mock_watch(mock_watch)
     mock_pod = self.make_mock_pod('test123')
     kc = KubernetesClient()
     kc._set_pod(mock_pod)
     kc.wait_for_completion()
     mock_stream = mock_watch.Watch.return_value.stream
     self.assertEqual(
         mock_stream.call_args,
         call(kc.core_api_instance.list_namespaced_pod,
              kc.namespace,
              field_selector='metadata.name=test123'))
예제 #20
0
 def test_wait_skips_pod_when_state_is_waiting(self, mock_watch,
                                               mock_get_namespace,
                                               mock_client):
     mock_pod = create_autospec(V1Pod)
     mock_pod.status.container_statuses[0].state = Mock(running=None,
                                                        waiting=True,
                                                        terminated=None)
     self.setup_mock_watch(mock_watch, [mock_pod])
     kc = KubernetesClient()
     kc._set_pod(Mock())
     kc.wait_for_completion()
     self.assertFalse(mock_watch.Watch.return_value.stop.called)
     self.assertFalse(
         mock_client.CoreV1Api.return_value.delete_namespaced_pod.called)
     self.assertIsNotNone(kc.pod)
예제 #21
0
 def test_delete_pod_name_calls_api(self, mock_get_namespace, mock_client):
     kc = KubernetesClient()
     kc.delete_pod_name('pod-123')
     self.assertEqual(
         'pod-123', mock_client.CoreV1Api.return_value.
         delete_namespaced_pod.call_args[0][0])
예제 #22
0
 def test_raises_on_set_second_pod(self, mock_get_namespace, mock_client):
     kc = KubernetesClient()
     kc._set_pod(Mock())
     with self.assertRaisesRegex(CalrissianJobException,
                                 'This client is already observing pod'):
         kc._set_pod(Mock())
예제 #23
0
 def test_get_pod_for_name_one_found(self, mock_get_namespace, mock_client):
     mock_client.CoreV1Api.return_value.list_namespaced_pod.return_value = Mock(
         items=['pod1'])
     kc = KubernetesClient()
     pod = kc.get_pod_for_name('somepod')
     self.assertEqual(pod, 'pod1')
예제 #24
0
 def test_multiple_statuses_raises(self):
     self.assertEqual(len(self.multiple_statuses), 2)
     with self.assertRaisesRegex(CalrissianJobException,
                                 'Expected 0 or 1 containers, found 2'):
         KubernetesClient.get_first_or_none(self.multiple_statuses)
예제 #25
0
 def test_singular_status(self):
     self.assertEqual(len(self.singular_status), 1)
     self.assertIsNotNone(
         KubernetesClient.get_first_or_none(self.singular_status))
예제 #26
0
 def test_none_statuses(self):
     self.assertIsNone(
         KubernetesClient.get_first_or_none(self.none_statuses))
     self.assertIsNone(
         KubernetesClient.get_first_or_none(self.empty_list_statuses))
예제 #27
0
파일: job.py 프로젝트: x-native/calrissian
class CalrissianCommandLineJob(ContainerCommandLineJob):

    container_tmpdir = '/tmp'

    def __init__(self, *args, **kwargs):
        super(CalrissianCommandLineJob, self).__init__(*args, **kwargs)
        self.client = KubernetesClient()
        volume_builder = KubernetesVolumeBuilder()
        volume_builder.add_persistent_volume_entries_from_pod(
            self.client.get_current_pod())
        self.volume_builder = volume_builder

    def make_tmpdir(self):
        # Doing this because cwltool.job does it
        if not os.path.exists(self.tmpdir):
            log.debug('os.makedirs({})'.format(self.tmpdir))
            os.makedirs(self.tmpdir)

    def populate_env_vars(self):
        # cwltool DockerCommandLineJob always sets HOME to self.builder.outdir
        # https://github.com/common-workflow-language/cwltool/blob/1.0.20181201184214/cwltool/docker.py#L338
        self.environment["HOME"] = self.builder.outdir
        # cwltool DockerCommandLineJob always sets TMPDIR to /tmp
        # https://github.com/common-workflow-language/cwltool/blob/1.0.20181201184214/cwltool/docker.py#L333
        self.environment["TMPDIR"] = self.container_tmpdir

    def wait_for_kubernetes_pod(self):
        return self.client.wait_for_completion()

    def report(self, completion_result, disk_bytes):
        """
        Convert the k8s-specific completion result into a report and submit it
        :param completion_result: calrissian.k8s.CompletionResult
        """
        report = TimedResourceReport.create(self.name, completion_result,
                                            disk_bytes)
        Reporter.add_report(report)

    def finish(self, completion_result, runtimeContext):
        exit_code = completion_result.exit_code
        if exit_code in self.successCodes:
            status = "success"
        elif exit_code in self.temporaryFailCodes:
            status = "temporaryFail"
        elif exit_code in self.permanentFailCodes:
            status = "permanentFail"
        elif exit_code == 0:
            status = "success"
        else:
            status = "permanentFail"
        # collect_outputs (and collect_output) is defined in command_line_tool
        outputs = self.collect_outputs(self.outdir, exit_code)

        disk_bytes = total_size(outputs)
        self.report(completion_result, disk_bytes)

        # Invoke the callback with a lock
        with runtimeContext.workflow_eval_lock:
            self.output_callback(outputs, status)

        # Cleanup our stagedir and tmp
        if self.stagedir is not None and os.path.exists(self.stagedir):
            log.debug('shutil.rmtree({}, {})'.format(self.stagedir, True))
            shutil.rmtree(self.stagedir, True)

        if runtimeContext.rm_tmpdir:
            log.debug('shutil.rmtree({}, {})'.format(self.tmpdir, True))
            shutil.rmtree(self.tmpdir, True)

    # Dictionary of supported features.
    # Not yet complete, only checks features of DockerRequirement
    supported_features = {'DockerRequirement': ['class', 'dockerPull']}

    def check_requirements(self):
        for feature in self.supported_features:
            requirement, is_required = self.get_requirement(feature)
            if requirement and is_required:
                for field in requirement:
                    if not field in self.supported_features[feature]:
                        raise UnsupportedRequirement(
                            'Error: feature {}.{} is not supported'.format(
                                feature, field))

    def _get_container_image(self):
        docker_requirement, _ = self.get_requirement('DockerRequirement')
        if docker_requirement:
            container_image = docker_requirement['dockerPull']
        else:
            # No dockerRequirement, use the default container
            container_image = self.builder.find_default_container()
        if not container_image:
            raise CalrissianCommandLineJobException(
                'Unable to create Job - Please ensure tool has a DockerRequirement with dockerPull or specify a default_container'
            )
        return container_image

    def quoted_command_line(self):
        return quoted_arg_list(self.command_line)

    def get_pod_labels(self, runtimeContext):
        if runtimeContext.pod_labels:
            return read_yaml(runtimeContext.pod_labels)
        else:
            return {}

    def create_kubernetes_runtime(self, runtimeContext):
        # In cwltool, the runtime list starts as something like ['docker','run'] and these various builder methods
        # append to that list with docker (or singularity) options like volume mount paths
        # As we build up kubernetes, these aren't really used this way so we leave it empty
        runtime = []

        # Append volume for outdir
        self._add_volume_binding(os.path.realpath(self.outdir),
                                 self.builder.outdir,
                                 writable=True)
        # Use a kubernetes emptyDir: {} volume for /tmp
        # Note that below add_volumes() may result in other temporary files being mounted
        # from the calrissian host's tmpdir prefix into an absolute container path, but this will
        # not conflict with '/tmp' as an emptyDir
        self._add_emptydir_volume_and_binding('tmpdir', self.container_tmpdir)

        # Call the ContainerCommandLineJob add_volumes method
        self.add_volumes(self.pathmapper,
                         runtime,
                         tmpdir_prefix=runtimeContext.tmpdir_prefix,
                         secret_store=runtimeContext.secret_store,
                         any_path_okay=True)

        if self.generatemapper is not None:
            # Seems to be true if docker is a hard requirement
            # This evaluates to true if docker_is_required is true
            # Used only for generatemapper add volumes
            any_path_okay = self.builder.get_requirement(
                "DockerRequirement")[1] or False
            self.add_volumes(self.generatemapper,
                             runtime,
                             tmpdir_prefix=runtimeContext.tmpdir_prefix,
                             secret_store=runtimeContext.secret_store,
                             any_path_okay=any_path_okay)

        pod_labels = self.get_pod_labels(runtimeContext)

        k8s_builder = KubernetesPodBuilder(
            self.name,
            self._get_container_image(),
            self.environment,
            self.volume_builder.volume_mounts,
            self.volume_builder.volumes,
            self.quoted_command_line(),
            self.stdout,
            self.stderr,
            self.stdin,
            self.builder.resources,
            pod_labels,
        )
        built = k8s_builder.build()
        log.debug('{}\n{}{}\n'.format('-' * 80, yaml.dump(built), '-' * 80))
        # Report an error if anything was added to the runtime list
        if runtime:
            log.error(
                'Runtime list is not empty. k8s does not use that, so you should see who put something there:\n{}'
                .format(' '.join(runtime)))
        return built

    def execute_kubernetes_pod(self, pod):
        self.client.submit_pod(pod)

    def _add_emptydir_volume_and_binding(self, name, target):
        self.volume_builder.add_emptydir_volume(name)
        self.volume_builder.add_emptydir_volume_binding(name, target)

    def _add_volume_binding(self, source, target, writable=False):
        self.volume_builder.add_volume_binding(source, target, writable)

    # Below are concrete implementations of methods called by add_volumes
    # They are based on https://github.com/common-workflow-language/cwltool/blob/1.0.20181201184214/cwltool/docker.py
    # But the key difference is that docker is invoked via command-line, so the ones in docker.py append to
    # a runtime list. Here, we instead call self._add_volume_binding()

    def add_file_or_directory_volume(
            self,
            runtime,  # type: List[Text]
            volume,  # type: MapperEnt
            host_outdir_tgt  # type: Optional[Text]
    ):
        """Append volume a file/dir mapping to the runtime option list."""
        if not volume.resolved.startswith("_:"):
            self._add_volume_binding(
                volume.resolved,
                volume.target)  # this one defaults to read_only

    def add_writable_file_volume(
            self,
            runtime,  # type: List[Text]
            volume,  # type: MapperEnt
            host_outdir_tgt,  # type: Optional[Text]
            tmpdir_prefix  # type: Text
    ):
        """Append a writable file mapping to the runtime option list."""
        if self.inplace_update:
            self._add_volume_binding(volume.resolved,
                                     volume.target,
                                     writable=True)
        else:
            if host_outdir_tgt:
                # shortcut, just copy to the output directory
                # which is already going to be mounted
                log.debug('shutil.copy({}, {})'.format(volume.resolved,
                                                       host_outdir_tgt))
                shutil.copy(volume.resolved, host_outdir_tgt)
            else:
                log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))
                tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
                file_copy = os.path.join(tmpdir,
                                         os.path.basename(volume.resolved))
                log.debug('shutil.copy({}, {})'.format(volume.resolved,
                                                       file_copy))
                shutil.copy(volume.resolved, file_copy)
                self._add_volume_binding(file_copy,
                                         volume.target,
                                         writable=True)
            ensure_writable(host_outdir_tgt or file_copy)

    def add_writable_directory_volume(
            self,
            runtime,  # type: List[Text]
            volume,  # type: MapperEnt
            host_outdir_tgt,  # type: Optional[Text]
            tmpdir_prefix  # type: Text
    ):
        """Append a writable directory mapping to the runtime option list."""
        if volume.resolved.startswith("_:"):
            # Synthetic directory that needs creating first
            if not host_outdir_tgt:
                log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))
                new_dir = os.path.join(tempfile.mkdtemp(dir=self.tmpdir),
                                       os.path.basename(volume.target))
                self._add_volume_binding(new_dir, volume.target, writable=True)
            elif not os.path.exists(host_outdir_tgt):
                log.debug('os.makedirs({}, 0o0755)'.format(host_outdir_tgt))
                os.makedirs(host_outdir_tgt, 0o0755)
        else:
            if self.inplace_update:
                self._add_volume_binding(volume.resolved,
                                         volume.target,
                                         writable=True)
            else:
                if not host_outdir_tgt:
                    log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))
                    tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
                    new_dir = os.path.join(tmpdir,
                                           os.path.basename(volume.resolved))
                    log.debug('shutil.copytree({}, {})'.format(
                        volume.resolved, new_dir))
                    shutil.copytree(volume.resolved, new_dir)
                    self._add_volume_binding(new_dir,
                                             volume.target,
                                             writable=True)
                else:
                    log.debug('shutil.copytree({}, {})'.format(
                        volume.resolved, host_outdir_tgt))
                    shutil.copytree(volume.resolved, host_outdir_tgt)
                ensure_writable(host_outdir_tgt or new_dir)

    def run(self, runtimeContext, tmpdir_lock=None):
        self.check_requirements()
        if tmpdir_lock:
            with tmpdir_lock:
                self.make_tmpdir()
        else:
            self.make_tmpdir()
        self.populate_env_vars()
        self._setup(runtimeContext)
        pod = self.create_kubernetes_runtime(
            runtimeContext)  # analogous to create_runtime()
        self.execute_kubernetes_pod(pod)  # analogous to _execute()
        completion_result = self.wait_for_kubernetes_pod()
        self.finish(completion_result, runtimeContext)

    # Below are concrete implementations of the remaining abstract methods in ContainerCommandLineJob
    # They are not implemented and not expected to be called, so they all raise NotImplementedError

    def get_from_requirements(
            self,
            r,  # type: Dict[Text, Text]
            pull_image,  # type: bool
            force_pull=False,  # type: bool
            tmp_outdir_prefix=DEFAULT_TMP_PREFIX  # type: Text
    ):
        raise NotImplementedError('get_from_requirements')

    def create_runtime(
        self,
        env,  # type: MutableMapping[Text, Text]
        runtime_context  # type: RuntimeContext
    ):
        # expected to return runtime list and cid string
        raise NotImplementedError('create_runtime')

    def append_volume(self, runtime, source, target, writable=False):
        """Add volume binding to the arguments list.
        This is called by the base class for file literals after they've been created.
        We already have a similar function, so we just call that.
        """
        self._add_volume_binding(source, target, writable)
예제 #28
0
 def test_multiple_statuses_raises(self):
     self.assertEqual(len(self.multiple_statuses), 2)
     with self.assertRaises(CalrissianJobException) as context:
         KubernetesClient.get_first_or_none(self.multiple_statuses)
     self.assertIn('Expected 0 or 1 containers, found 2',
                   str(context.exception))