def test_is_candidate_for_deletion(self): deleter = JobDeleter(Mock()) # No annotation assert not deleter.is_candidate_for_deletion(V1Job(metadata=V1ObjectMeta())) assert not deleter.is_candidate_for_deletion( V1Job(metadata=V1ObjectMeta(annotations={})) ) # Wayyyy in the past assert deleter.is_candidate_for_deletion( V1Job( metadata=V1ObjectMeta( annotations={JobDeleter.JOB_DELETION_TIME_ANNOTATION: 0} ) ) ) # Far in the future assert not deleter.is_candidate_for_deletion( V1Job( metadata=V1ObjectMeta( annotations={ JobDeleter.JOB_DELETION_TIME_ANNOTATION: int( time.time() + 10000 ) } ) ) )
def test_job_is_finished(self): manager = JobManager(namespace="xyz", signer=Mock(), register=Mock()) job = V1Job(status=V1JobStatus(conditions=[])) assert not manager.job_is_finished(job) job = V1Job(status=V1JobStatus(conditions=[], completion_time=datetime.now())) assert not manager.job_is_finished(job), "Completion time field is unchecked" job = V1Job( status=V1JobStatus( conditions=[V1JobCondition(status="True", type="Complete")] ) ) assert manager.job_is_finished(job), "A complete job should be finished" job = V1Job( status=V1JobStatus( conditions=[V1JobCondition(status="False", type="Complete")] ) ) assert not manager.job_is_finished( job ), "False job status conditions should be ignored" job = V1Job( status=V1JobStatus( conditions=[V1JobCondition(status="True", type="Failed")] ) ) assert manager.job_is_finished(job), "A failed job is finished"
def test_main_failed_pods(mocker, main_mock): job_status_history = [ V1Job(status=V1JobStatus(active=1, succeeded=1)), V1Job(status=V1JobStatus(active=1, succeeded=1)), V1Job(status=V1JobStatus(active=0, succeeded=1, failed=1)) ] mocker.patch.object(main_mock.kubernetes_client_mock, 'read_namespaced_job').side_effect = job_status_history main() assert main_mock.kubernetes_config_load_mock.call_count == 1 assert main_mock.builtins_open_mock.call_count == 2 assert main_mock.time_sleep_mock.call_count == len(job_status_history) - 1
def test_generate_with_dict_config(self): job = V1Job(metadata=V1ObjectMeta(name="iloveyouabushelandapeck")) generator = JobGenerator(StaticSpecSource(job.to_dict())) j = generator.generate() assert (j["metadata"]["name"] != job.metadata.name), "Should have mutated job name"
def test_job_to_dict(): j = V1Job(api_version='abc', kind='foo') d = util.job_to_dict(j) assert d is not None assert isinstance(d, dict) assert d == ApiClient().sanitize_for_serialization(j)
def test__handle_jobs_raises_error(): statuses = [complete_status, inprogress_status, failed_status] jobs = [ V1Job(metadata=V1ObjectMeta(name=str(k)), status=status) for k, status in enumerate(statuses) ] with pytest.raises(ValueError): _handle_jobs(jobs, True)
def test_long_name(self): mock_config_source = Mock() mock_config_source.get.return_value = V1Job(metadata=V1ObjectMeta( name="thisisanextremelylongnamethathasalotofcharacters")) generator = JobGenerator(mock_config_source) job = generator.generate() assert len(job.metadata.name) == 63
def test_unique_names(self): generator = JobGenerator( StaticSpecSource( V1Job(metadata=V1ObjectMeta(name="iloveyouabushelandapeck")))) j1 = generator.generate() j2 = generator.generate() assert (j1.metadata.name != j2.metadata.name), "Each generated job must have a unique name"
def test_generate_with_template_args(self): mock_config_source = Mock() mock_config_source.get.return_value = V1Job(metadata=V1ObjectMeta( name="anotherone")) generator = JobGenerator(mock_config_source) template_args = {"foo": "bar"} generator.generate(template_args=template_args) mock_config_source.get.assert_called_once_with( template_args=template_args)
def test_short_name(self): mock_config_source = Mock() mock_config_source.get.return_value = V1Job(metadata=V1ObjectMeta( name="shortname")).to_dict() generator = JobGenerator(mock_config_source) job = generator.generate() assert job["metadata"]["name"].startswith("shortname-") assert len( job["metadata"]["name"]) == 9 + 1 + 2 * JobGenerator.SUFFIX_BYTES
def _create_flush_job( batch_api: BatchV1Api, command: List[str], env: List[V1EnvVar], image: str, name: str, namespace: str, service_account_name: str, ) -> V1Job: logger.info(f"creating job: {name}") try: return batch_api.create_namespaced_job( namespace=namespace, body=V1Job( api_version="batch/v1", kind="Job", metadata=V1ObjectMeta(name=name, namespace=namespace), spec=V1JobSpec( template=V1PodTemplateSpec( spec=V1PodSpec( containers=[ V1Container( image=image, command=command, name="flush", volume_mounts=[ V1VolumeMount(mount_path="/data", name="queue") ], env=env, ) ], restart_policy="OnFailure", volumes=[ V1Volume( name="queue", persistent_volume_claim=( V1PersistentVolumeClaimVolumeSource( claim_name=name ) ), ) ], service_account_name=service_account_name, ) ) ), ), ) except ApiException as e: if e.reason == CONFLICT and json.loads(e.body)["reason"] == ALREADY_EXISTS: logger.info(f"using existing job: {name}") return batch_api.read_namespaced_job(name, namespace) raise
def test_fetch_jobs_continue(self, mock_batch_client): _continue = "xyz" mock_batch_client.list_namespaced_job.side_effect = [ V1JobList( items=[V1Job(metadata=V1ObjectMeta(name="1"))], metadata=V1ListMeta(_continue=_continue), ), V1JobList( items=[V1Job(metadata=V1ObjectMeta(name="2"))], metadata=V1ListMeta() ), ] namespace = "blech" manager = JobManager( namespace=namespace, signer=Mock(), register=StaticJobDefinitionsRegister() ) assert len(list(manager.fetch_jobs())) == 2 assert mock_batch_client.list_namespaced_job.call_count == 2 mock_batch_client.list_namespaced_job.assert_called_with( namespace=namespace, _continue=_continue )
def test_delete_job(self, mock_batch_client): namespace = "whee" name = "jobname" manager = JobManager( namespace=namespace, signer=Mock(), register=StaticJobDefinitionsRegister() ) manager.delete_job(V1Job(metadata=V1ObjectMeta(name=name))) mock_batch_client.delete_namespaced_job.assert_called_once_with( name=name, namespace=namespace, body=V1DeleteOptions(propagation_policy="Foreground"), )
def test_fetch_jobs_filters(self, mock_batch_client): mock_batch_client.list_namespaced_job.return_value = V1JobList( items=[V1Job(metadata=V1ObjectMeta(name="1"))], metadata=V1ListMeta() ) namespace = "hellomoto" signer = JobSigner("foo") manager = JobManager( namespace=namespace, signer=signer, register=StaticJobDefinitionsRegister() ) assert len(list(manager.fetch_jobs(extra="filter"))) == 1 mock_batch_client.list_namespaced_job.assert_called_once_with( namespace=namespace, label_selector=signer.label_selector(extra="filter") )
def test_mark_deletion_time_existing_annotation(self, mock_batch_client): name = "deletionjobalreadyannotated" namespace = "xyzabc" manager = Mock(namespace=namespace) deleter = JobDeleter(manager) job = V1Job( metadata=V1ObjectMeta( name=name, annotations={JobDeleter.JOB_DELETION_TIME_ANNOTATION: 0} ) ) deleter.mark_deletion_time(job, 0) mock_batch_client.patch_namespaced_job.assert_not_called()
def test_mark_deletion_time(self, mock_batch_client): name = "deletionjob" namespace = "abcxyz" manager = Mock(namespace=namespace) deleter = JobDeleter(manager) job = V1Job(metadata=V1ObjectMeta(name=name, annotations={})) deleter.mark_deletion_time(job, 3600) mock_batch_client.patch_namespaced_job.assert_called_once_with( name=name, namespace=namespace, body=ANY ) deletion_time_1 = mock_batch_client.patch_namespaced_job.call_args[1][ "body" ].metadata.annotations[deleter.JOB_DELETION_TIME_ANNOTATION] mock_batch_client.reset_mock() job = V1Job(metadata=V1ObjectMeta(name=name, annotations={})) deleter.mark_deletion_time(job, 0) deletion_time_2 = mock_batch_client.patch_namespaced_job.call_args[1][ "body" ].metadata.annotations[deleter.JOB_DELETION_TIME_ANNOTATION] assert int(deletion_time_1) > int(deletion_time_2)
def create_v1job( cls, job_spec: JobSpec, name: str, labels: Optional[Dict[str, str]] = None, ) -> V1Job: '''creates a V1Job from a JobSpec, a job name, and an optional set of labels''' name = utils.sanitize_job_name(name) job_metadata = V1ObjectMeta(generate_name=name + '-', labels=labels) return V1Job(api_version=k.BATCH_V1_VERSION, kind='Job', metadata=job_metadata, spec=job_spec.spec)
def test_create_job_with_template(self, mock_batch_client): mock_batch_client.create_namespaced_job.return_value = V1Job( metadata=V1ObjectMeta() ) job_name = "job" mock_generator = Mock() manager = JobManager( namespace="geerick", signer=Mock(), register=StaticJobDefinitionsRegister({job_name: mock_generator}), ) template_args = {"dummy": "template"} manager.create_job(job_name, template_args=template_args) mock_generator.generate.assert_called_once_with(template_args=template_args)
def test_info(self, batch_p): batch_p.return_value = V1Job(metadata=V1ObjectMeta(name='id-cate'), status=V1Status(message='Ganz blöd', status=100)) res = cubegens.status('id') batch_p.assert_called_once() self.assertEqual(100, res['status']) batch_p.side_effect = ApiValueError( "Missing the required parameter `namespace` " "when calling `read_namespaced_job_status`") res = cubegens.status('id') self.assertDictEqual({}, res)
def test_sets_label_job(self): signature = "hehehe" signer = JobSigner(signature) job = V1Job(metadata=V1ObjectMeta()) signer.sign(job) assert ( job.metadata.labels[JobSigner.LABEL_KEY] == signature ), "Metadata label not set" job_definition_name = "funfun" signer.sign(job, job_definition_name) assert ( job.metadata.labels[JobSigner.JOB_DEFINITION_NAME_KEY] == job_definition_name ), "Job Definition label not set"
def _create_kube_job(self, op_inst, podspec, namespace=KubernetesConfig.K8S_NAMESPACE): job_name = op_inst.guid + "-job" job_metadata = client.V1ObjectMeta( name=job_name, namespace=namespace, labels={KubernetesConfig.K8S_LABELS_OPGUID: op_inst.guid}) # Label for the service to bind to pod_name = op_inst.guid + "-pod" pod_metadata = client.V1ObjectMeta( name=pod_name, namespace=namespace, labels={KubernetesConfig.K8S_LABELS_OPGUID: op_inst.guid}) # Label for the service to bind to jobspec = V1JobSpec( template=V1PodTemplateSpec(metadata=pod_metadata, spec=podspec)) kube_job = V1Job(metadata=job_metadata, spec=jobspec) return kube_job
def test_create_job(self, mock_batch_client): mock_batch_client.create_namespaced_job.return_value = V1Job( metadata=V1ObjectMeta() ) namespace = "hellomoto" g1 = Mock() g2 = Mock() manager = JobManager( namespace=namespace, signer=Mock(), register=StaticJobDefinitionsRegister({"g1": g1, "g2": g2}), ) manager.create_job("g2") g1.assert_not_called() g2.generate.assert_called_once() mock_batch_client.create_namespaced_job.assert_called_once_with( namespace=namespace, body=ANY )
def test_delete_complete_jobs_raises_server_error(api: MagicMock, batch_api: MagicMock): batch_api.list_namespaced_job.return_value = V1JobList(items=[ # delete because complete V1Job( metadata=V1ObjectMeta( name="flush-pv-1", uid="uid-flush-pv-1", resource_version="1"), status=V1JobStatus( conditions=[V1JobCondition(status="", type="Complete")]), ), ]) def delete_job(name, namespace, body): raise ApiException(reason="Server Error") batch_api.delete_namespaced_job.side_effect = delete_job with pytest.raises(ApiException): delete_complete_jobs(api, batch_api, "namespace") batch_api.list_namespaced_job.called_once_with("namespace")
def create_job(self, job_metadata: V1ObjectMeta, job_spec: V1JobSpec) -> V1Job: """Create a Kubernetes job. :return: The V1Job object representing the created job. """ job_request = V1Job( api_version='batch/v1', kind='Job', metadata=job_metadata, spec=job_spec ) _load_kube_config2(print_output=self.print_output) try: batch_api = client.BatchV1Api() job: V1Job = batch_api.create_namespaced_job(namespace=self.namespace, body=job_request) except ApiException as error: raise APIConnectionError(error) return job
def test_export_job(): with tempfile.TemporaryDirectory() as tmpdir: j = V1Job(api_version='abc', kind='foo') nnd = util.nonnull_dict(util.job_to_dict(j)) fname = os.path.join(tmpdir, 'foo.json') assert util.export_job(j, fname) assert os.path.exists(fname) with open(fname, 'r') as f: x = json.load(f) assert x == nnd fname = os.path.join(tmpdir, 'foo.yaml') assert util.export_job(j, fname) assert os.path.exists(fname) with open(fname, 'r') as f: x = yaml.load(f) assert x == nnd fname = os.path.join(tmpdir, 'foo.xyz') assert not util.export_job(j, fname)
def main_mock(mocker): mocker.patch('os.path.isfile').return_value = False kubernetes_config_load = mocker.patch('kubernetes.config.load_incluster_config') mocker.patch('main.getenv').return_value = "fake_job_name" file_mock = mocker.MagicMock(read=lambda: 'fake-namespace') open_mock = mocker.MagicMock(__enter__=lambda x: file_mock) builtins_open = mocker.patch('builtins.open') builtins_open.return_value = open_mock main_sleep = mocker.patch('main.sleep') fake_k8s_client = mocker.MagicMock() mocker.patch('kubernetes.client.BatchV1Api').return_value = fake_k8s_client mocker.patch.object(fake_k8s_client, 'read_namespaced_job').return_value = V1Job( status=V1JobStatus(active=0, succeeded=1)) class MainMock: kubernetes_config_load_mock = kubernetes_config_load builtins_open_mock = builtins_open kubernetes_client_mock = fake_k8s_client time_sleep_mock = main_sleep return MainMock
def test_job_status(): for s in JobStatus: terminal = s.is_terminal() if s.name in ['FAILED', 'SUCCEEDED', 'UNAVAILABLE']: assert terminal else: assert not terminal # completed jobs status = V1JobStatus(completion_time=datetime.now(), succeeded=1) job_info = V1Job(status=status) job_status = JobStatus.from_job_info(job_info) assert job_status == JobStatus.SUCCEEDED status = V1JobStatus(completion_time=datetime.now(), succeeded=0) job_info = V1Job(status=status) job_status = JobStatus.from_job_info(job_info) assert job_status == JobStatus.FAILED # active jobs status = V1JobStatus(completion_time=None, active=1) job_info = V1Job(status=status) job_status = JobStatus.from_job_info(job_info) assert job_status == JobStatus.RUNNING # pending jobs status = V1JobStatus(completion_time=None, active=0) job_info = V1Job(status=status) job_status = JobStatus.from_job_info(job_info) assert job_status == JobStatus.PENDING # unknown state status = V1JobStatus() job_info = V1Job(status=status) job_status = JobStatus.from_job_info(job_info) assert job_status == JobStatus.STATE_UNSPECIFIED job_info = V1Job() job_status = JobStatus.from_job_info(job_info) assert job_status == JobStatus.STATE_UNSPECIFIED job_status = JobStatus.from_job_info(None) assert job_status == JobStatus.STATE_UNSPECIFIED
def launch(self, name, docker_config: DockerConfig, mounts, env, blocking: bool = True): name = (self.prefix + 'update-' + name.lower()).replace('_', '-') # If we have been given a username or password for the registry, we have to # update it, if we haven't been, make sure its been cleaned up in the system # so we don't leave passwords lying around pull_secret_name = f'{name}-job-pull-secret' use_pull_secret = False try: # Check if there is already a username/password defined for this job current_pull_secret = self.api.read_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) except ApiException as error: if error.status != 404: raise current_pull_secret = None if docker_config.registry_username or docker_config.registry_password: use_pull_secret = True # Build the secret we want to make new_pull_secret = V1Secret( metadata=V1ObjectMeta(name=pull_secret_name, namespace=self.namespace), type='kubernetes.io/dockerconfigjson', string_data={ '.dockerconfigjson': create_docker_auth_config( image=docker_config.image, username=docker_config.registry_username, password=docker_config.registry_password, ) } ) # Send it to the server if current_pull_secret: self.api.replace_namespaced_secret(pull_secret_name, namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) else: self.api.create_namespaced_secret(namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) elif current_pull_secret: # If there is a password set in kubernetes, but not in our configuration clear it out self.api.delete_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) try: self.batch_api.delete_namespaced_job(name=name, namespace=self.namespace, propagation_policy='Background', _request_timeout=API_TIMEOUT) while True: self.batch_api.read_namespaced_job(namespace=self.namespace, name=name, _request_timeout=API_TIMEOUT) time.sleep(1) except ApiException: pass volumes = [] volume_mounts = [] for index, mnt in enumerate(mounts): volumes.append(V1Volume( name=f'mount-{index}', persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name=mnt['volume'], read_only=False ), )) volume_mounts.append(V1VolumeMount( name=f'mount-{index}', mount_path=mnt['dest_path'], sub_path=mnt['source_path'], read_only=False, )) if CONFIGURATION_CONFIGMAP: volumes.append(V1Volume( name='mount-configuration', config_map=V1ConfigMapVolumeSource( name=CONFIGURATION_CONFIGMAP ), )) volume_mounts.append(V1VolumeMount( name='mount-configuration', mount_path='/etc/assemblyline/config.yml', sub_path="config", read_only=True, )) section = 'service' labels = { 'app': 'assemblyline', 'section': section, 'privilege': 'core', 'component': 'update-script', } labels.update(self.extra_labels) metadata = V1ObjectMeta( name=name, labels=labels ) environment_variables = [V1EnvVar(name=_e.name, value=_e.value) for _e in docker_config.environment] environment_variables.extend([V1EnvVar(name=k, value=v) for k, v in env.items()]) environment_variables.extend([V1EnvVar(name=k, value=os.environ[k]) for k in INHERITED_VARIABLES if k in os.environ]) environment_variables.append(V1EnvVar(name="LOG_LEVEL", value=self.log_level)) cores = docker_config.cpu_cores memory = docker_config.ram_mb memory_min = min(docker_config.ram_mb_min, memory) container = V1Container( name=name, image=docker_config.image, command=docker_config.command, env=environment_variables, image_pull_policy='Always', volume_mounts=volume_mounts, resources=V1ResourceRequirements( limits={'cpu': cores, 'memory': f'{memory}Mi'}, requests={'cpu': cores / 4, 'memory': f'{memory_min}Mi'}, ) ) pod = V1PodSpec( volumes=volumes, restart_policy='Never', containers=[container], priority_class_name=self.priority_class, ) if use_pull_secret: pod.image_pull_secrets = [V1LocalObjectReference(name=pull_secret_name)] job = V1Job( metadata=metadata, spec=V1JobSpec( backoff_limit=1, completions=1, template=V1PodTemplateSpec( metadata=metadata, spec=pod ) ) ) status = self.batch_api.create_namespaced_job(namespace=self.namespace, body=job, _request_timeout=API_TIMEOUT).status if blocking: try: while not (status.failed or status.succeeded): time.sleep(3) status = self.batch_api.read_namespaced_job(namespace=self.namespace, name=name, _request_timeout=API_TIMEOUT).status self.batch_api.delete_namespaced_job(name=name, namespace=self.namespace, propagation_policy='Background', _request_timeout=API_TIMEOUT) except ApiException as error: if error.status != 404: raise
def test_flush_released_pvs(api: MagicMock, batch_api: MagicMock): api.list_persistent_volume.return_value = V1PersistentVolumeList( items=[ # don't flush because job exists V1PersistentVolume(metadata=V1ObjectMeta(name="pv-0")), # don't flush because wrong namespace V1PersistentVolume( metadata=V1ObjectMeta(name="pv-4"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference(namespace="other"), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), # don't flush because it's already done V1PersistentVolume( metadata=V1ObjectMeta(name="pv-5"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference(namespace="namespace"), persistent_volume_reclaim_policy="Delete", ), status=V1PersistentVolumeStatus(phase="Released"), ), # don't flush because it's in use V1PersistentVolume( metadata=V1ObjectMeta(name="pv-6"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # try to flush because pvc is bound but job was created after jobs were listed V1PersistentVolume( metadata=V1ObjectMeta(name="pv-7"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="flush-pv-7", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # flush because pvc is bound but job does not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-8"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="flush-pv-8", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # flush because pvc is not yet bound and job does not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-9"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), # flush because pvc and job both do not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-A"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), ] ) def create_pvc(namespace: str, body: V1PersistentVolumeClaim): if body.metadata.name == "flush-pv-9": exc = ApiException(status=409, reason="Conflict") exc.body = '{"reason":"AlreadyExists"}' raise exc body.metadata.uid = "uid-" + body.metadata.name body.metadata.resource_version = "1" return body api.create_namespaced_persistent_volume_claim.side_effect = create_pvc def read_pvc(name: str, namespace: str): return V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=name, namespace=namespace, uid="uid-" + name, resource_version="2" ) ) api.read_namespaced_persistent_volume_claim.side_effect = read_pvc batch_api.list_namespaced_job.return_value = V1JobList( items=[V1Job(metadata=V1ObjectMeta(name="flush-pv-0"))] ) def create_job(namespace, body): if body.metadata.name == "flush-pv-7": exc = ApiException(status=409, reason="Conflict") exc.body = '{"reason":"AlreadyExists"}' raise exc batch_api.create_namespaced_job.side_effect = create_job flush_released_pvs(api, batch_api, "command", "env", "image", "namespace") api.list_persistent_volume.assert_called_once_with() batch_api.list_namespaced_job.assert_called_once_with("namespace") assert [f"flush-pv-{i}" for i in "9A"] == [ call.kwargs["body"].metadata.name for call in api.create_namespaced_persistent_volume_claim.call_args_list ] api.read_namespaced_persistent_volume_claim.assert_called_once_with( "flush-pv-9", "namespace" ) assert [("pv-9", "flush-pv-9"), ("pv-A", "flush-pv-A"),] == [ ( call.kwargs["name"], call.kwargs["body"].spec.claim_ref and call.kwargs["body"].spec.claim_ref.name, ) for call in api.patch_persistent_volume.call_args_list ] assert [f"flush-pv-{i}" for i in "789A"] == [ call.kwargs["body"].metadata.name for call in batch_api.create_namespaced_job.call_args_list ] batch_api.read_namespaced_job.assert_called_once_with("flush-pv-7", "namespace")
def test_info(self, status_p, create_p, get_p, punits_p): status_p.return_value = V1Job(status=V1JobStatus( conditions=[V1JobCondition(type='Complete', status='ready')])) create_p.return_value = { 'cubegen_id': 'id', 'status': V1JobStatus().to_dict() } get_p.return_value = { 'cubegen_id': 'id', 'status': 'ready', 'output': [_OUTPUT], 'progress': 100 } punits_p.return_value = dict(punits=dict(total_count=1000), count=500) res = cubegens.info(user_id='drwho', email='*****@*****.**', body=_CFG, token='fdsvdf') expected = { 'dataset_descriptor': { 'data_id': 'test_cube.zarr', 'type_specifier': 'dataset', 'crs': 'WGS84', 'bbox': [-2.24, 51.99, -2.15, 52.05], 'time_range': ['2020-12-01', '2021-02-28'], 'time_period': '1D', 'dims': { 'time': 90, 'lat': 674, 'lon': 1024 }, 'spatial_res': 8.9e-05, 'data_vars': { 'B02': { 'name': 'B02', 'dtype': 'float32', 'dims': ['time', 'lat', 'lon'] }, 'CLM': { 'name': 'CLM', 'dtype': 'float32', 'dims': ['time', 'lat', 'lon'] } } }, 'size_estimation': { 'image_size': [1024, 674], 'tile_size': [512, 512], 'num_variables': 5, 'num_tiles': [2, 1], 'num_requests': 900, 'num_bytes': 1242316800 }, 'cost_estimation': { 'required': 540, 'available': 500, 'limit': 1000 } } self.assertDictEqual(expected, res) punits_p.return_value = dict(punits=dict(total_count=1000)) with self.assertRaises(api.ApiError) as e: cubegens.info(user_id='drwho', email='*****@*****.**', body=_CFG, token='fdsvdf') self.assertEqual( "Error. Cannot handle punit data. Entry 'count' is missing.", str(e.exception)) punits_p.return_value = dict(punits=dict(total_count=1000), count=500) cfg = _CFG.copy() del cfg['input_config'] with self.assertRaises(api.ApiError) as e: cubegens.info(user_id='drwho', email='*****@*****.**', body=cfg, token='fdsvdf') self.assertEqual("Error. Invalid input configuration.", str(e.exception)) cfg = _CFG.copy() # noinspection PyTypeChecker cfg['input_configs'] = [_CFG['input_config']] res = cubegens.info(user_id='drwho', email='*****@*****.**', body=cfg, token='fdsvdf') self.assertEqual(expected, res) output = _OUTPUT.replace("Awaiting", "Awaitingxxx") get_p.return_value = { 'cubegen_id': 'id', 'status': 'ready', 'output': [output], 'progress': 100 } with self.assertRaises(api.ApiError) as e: cubegens.info(user_id='drwho', email='*****@*****.**', body=_CFG, token='fdsvdf') self.assertEqual("Expecting value: line 2 column 1 (char 1)", str(e.exception))