def test_submit_validator_passes(self): job_params = {'inputs': set(), 'outputs': set(), 'mounts': set()} job_resources = job_model.Resources( logging=job_model.LoggingParam('gs://buck/logs', job_model.P_GCS)) param_util.validate_submit_args_or_fail( job_model.JobDescriptor(None, job_params, job_resources, TASK_DESCRIPTORS), provider_name='MYPROVIDER', input_providers=[job_model.P_GCS], output_providers=[job_model.P_GCS], logging_providers=[job_model.P_GCS])
def test_submit_validator_fails(self, name, path, inwl, outwl, logwl): job_params = {'inputs': set(), 'outputs': set(), 'mounts': set()} job_resources = job_model.Resources( logging=job_model.LoggingParam('gs://buck/logs', job_model.P_GCS)) err_expected = 'Unsupported %s path (%s) for provider' % (name, path) with six.assertRaisesRegex(self, ValueError, re.escape(err_expected)): param_util.validate_submit_args_or_fail(job_model.JobDescriptor( None, job_params, job_resources, TASK_DESCRIPTORS), provider_name='MYPROVIDER', input_providers=inwl, output_providers=outwl, logging_providers=logwl)
def test_foo(self, retries, max_preemptible_attempts, list_of_list_of_operations, expected): def chronology(list_of_list_of_operations): for operations in list_of_list_of_operations: self.provider.set_operations(operations) yield 1 establish_chronology(chronology(list_of_list_of_operations)) job_descriptor = job_model.JobDescriptor( job_metadata={'create-time': 123456}, job_params=None, job_resources=job_model.Resources( logging=job_model.LoggingParam('gs://buck/logs', job_model.P_GCS), max_preemptible_attempts=param_util.PreemptibleParam( max_preemptible_attempts)), task_descriptors=[ job_model.TaskDescriptor( task_metadata={ 'task-id': 123, 'create-time': 123456 }, task_params=None, task_resources=None) ]) poll_interval = 1 ret = dsub_command._wait_and_retry( self.provider, 'job-1', poll_interval, retries, job_descriptor, summary=False) tasks = self.provider.lookup_job_tasks({'*'}) # First, the number of tasks returned by lookup_job_tasks should be equal # to the total number of task attempts. expected_num_of_tasks = len(list_of_list_of_operations[-1]) self.assertEqual(len(tasks), expected_num_of_tasks) # Second, check that the return value of _wait_and_retry is correct. self.assertEqual(ret, expected)
def _make_task(job_create_time, task_create_time, task_id): return local.LocalTask(job_descriptor=job_model.JobDescriptor( job_metadata={'create-time': job_create_time}, job_params=None, job_resources=None, task_descriptors=[ job_model.TaskDescriptor(task_metadata={ 'task-id': task_id, 'create-time': task_create_time }, task_params=None, task_resources=None) ]), task_status=None, log_detail=None, end_time=None, last_update=None, pid=None, events=None)
_ENV_LIST_JOB_DESCRIPTOR = job_model.JobDescriptor( job_metadata={ 'dsub-version': '0.1.4', 'job-id': 'script_env--dsubuser--171122-142837-321721', 'job-name': 'script_env_test.sh', 'user-id': 'dsubuser', 'create-time': CREATE_TIME }, job_resources=job_model.Resources( logging='gs://b/dsub/sh/local/env_list/env_list/logging/env_list.log'), job_params={ 'envs': { job_model.EnvParam('VAR1', 'VAL1'), job_model.EnvParam('VAR2', 'VAL2'), job_model.EnvParam('VAR3', 'VAL3'), job_model.EnvParam('VAR4', 'VAL4'), job_model.EnvParam('VAR5', 'VAL5'), }, 'labels': set(), 'inputs': set(), 'outputs': set(), 'mounts': set(), }, task_descriptors=[ job_model.TaskDescriptor( task_metadata={'task-id': None}, task_resources=job_model.Resources( logging_path= 'gs://b/dsub/sh/local/env_list/env_list/logging/env_list.log'), task_params={ 'envs': set(), 'labels': set(), 'inputs': set(), 'outputs': set(), }) ])